--- a/appobject.py Wed Jun 08 15:11:45 2011 +0200
+++ b/appobject.py Wed Jun 08 17:08:00 2011 +0200
@@ -180,12 +180,13 @@
return self.__class__.__name__
def search_selector(self, selector):
- """search for the given selector or selector instance in the selectors
- tree. Return it of None if not found
+ """search for the given selector, selector instance or tuple of
+ selectors in the selectors tree. Return None if not found.
"""
if self is selector:
return self
- if isinstance(selector, type) and isinstance(self, selector):
+ if (isinstance(selector, type) or isinstance(selector, tuple)) and \
+ isinstance(self, selector):
return self
return None
@@ -250,8 +251,8 @@
return merged_selectors
def search_selector(self, selector):
- """search for the given selector or selector instance in the selectors
- tree. Return it of None if not found
+ """search for the given selector or selector instance (or tuple of
+ selectors) in the selectors tree. Return None if not found
"""
for childselector in self.selectors:
if childselector is selector:
@@ -259,7 +260,8 @@
found = childselector.search_selector(selector)
if found is not None:
return found
- return None
+ # if not found in children, maybe we are looking for self?
+ return super(MultiSelector, self).search_selector(selector)
class AndSelector(MultiSelector):
--- a/cwvreg.py Wed Jun 08 15:11:45 2011 +0200
+++ b/cwvreg.py Wed Jun 08 17:08:00 2011 +0200
@@ -194,17 +194,18 @@
_ = unicode
from warnings import warn
+from datetime import datetime, date, time, timedelta
from logilab.common.decorators import cached, clear_cache
from logilab.common.deprecation import deprecated, class_deprecated
from logilab.common.modutils import cleanup_sys_modules
from rql import RQLHelper
+from yams.constraints import BASE_CONVERTERS
from cubicweb import (ETYPE_NAME_MAP, Binary, UnknownProperty, UnknownEid,
ObjectNotFound, NoSelectableObject, RegistryNotFound,
CW_EVENT_MANAGER)
-from cubicweb.utils import dump_class
from cubicweb.vregistry import VRegistry, Registry, class_regid, classid
from cubicweb.rtags import RTAGS
@@ -368,7 +369,10 @@
# make a copy event if cls.__regid__ == etype, else we may have pb for
# client application using multiple connections to different
# repositories (eg shingouz)
- cls = dump_class(cls, etype)
+ # __autogenerated__ attribute is just a marker
+ cls = type(str(etype), (cls,), {'__autogenerated__': True,
+ '__doc__': cls.__doc__,
+ '__module__': cls.__module__})
cls.__regid__ = etype
cls.__initialize__(self.schema)
return cls
@@ -412,10 +416,8 @@
if not isinstance(view, class_deprecated)]
try:
view = self._select_best(views, req, rset=rset, **kwargs)
- if view.linkable():
+ if view is not None and view.linkable():
yield view
- except NoSelectableObject:
- continue
except Exception:
self.exception('error while trying to select %s view for %s',
vid, rset)
@@ -849,24 +851,15 @@
return self['views'].select(__vid, req, rset=rset, **kwargs)
-import decimal
-from datetime import datetime, date, time, timedelta
-
-YAMS_TO_PY = { # XXX unify with yams.constraints.BASE_CONVERTERS?
- 'String' : unicode,
- 'Bytes': Binary,
- 'Password': str,
-
- 'Boolean': bool,
- 'Int': int,
- 'Float': float,
- 'Decimal': decimal.Decimal,
-
+# XXX unify with yams.constraints.BASE_CONVERTERS?
+YAMS_TO_PY = BASE_CONVERTERS.copy()
+YAMS_TO_PY.update({
+ 'Bytes': Binary,
'Date': date,
'Datetime': datetime,
'TZDatetime': datetime,
'Time': time,
'TZTime': time,
'Interval': timedelta,
- }
+ })
--- a/dataimport.py Wed Jun 08 15:11:45 2011 +0200
+++ b/dataimport.py Wed Jun 08 17:08:00 2011 +0200
@@ -445,14 +445,14 @@
ObjectStore.__init__(self)
if session is None:
sys.exit('please provide a session of run this script with cubicweb-ctl shell and pass cnx as session')
- if not hasattr(session, 'set_pool'):
+ if not hasattr(session, 'set_cnxset'):
# connection
cnx = session
session = session.request()
- session.set_pool = lambda : None
+ session.set_cnxset = lambda : None
commit = commit or cnx.commit
else:
- session.set_pool()
+ session.set_cnxset()
self.session = session
self._commit = commit or session.commit
@@ -462,7 +462,7 @@
def commit(self):
txuuid = self._commit()
- self.session.set_pool()
+ self.session.set_cnxset()
return txuuid
def rql(self, *args):
@@ -642,7 +642,9 @@
for k, v in kwargs.iteritems():
kwargs[k] = getattr(v, 'eid', v)
entity, rels = self.metagen.base_etype_dicts(etype)
+ # make a copy to keep cached entity pristine
entity = copy(entity)
+ entity.cw_edited = copy(entity.cw_edited)
entity.cw_clear_relation_cache()
self.metagen.init_entity(entity)
entity.cw_edited.update(kwargs, skipsec=False)
--- a/devtools/__init__.py Wed Jun 08 15:11:45 2011 +0200
+++ b/devtools/__init__.py Wed Jun 08 17:08:00 2011 +0200
@@ -28,15 +28,17 @@
import pickle
import glob
import warnings
+import hashlib
from datetime import timedelta
from os.path import (abspath, join, exists, basename, dirname, normpath, split,
isfile, isabs, splitext, isdir, expanduser)
from functools import partial
-import hashlib
from logilab.common.date import strptime
from logilab.common.decorators import cached, clear_cache
-from cubicweb import CW_SOFTWARE_ROOT, ConfigurationError, schema, cwconfig, BadConnectionId
+
+from cubicweb import ConfigurationError, ExecutionError, BadConnectionId
+from cubicweb import CW_SOFTWARE_ROOT, schema, cwconfig
from cubicweb.server.serverconfig import ServerConfiguration
from cubicweb.etwist.twconfig import TwistedConfiguration
@@ -91,7 +93,7 @@
""" Idea: this is less costly than a full re-creation of the repo object.
off:
* session are closed,
- * pools are closed
+ * cnxsets are closed
* system source is shutdown
"""
if not repo._needs_refresh:
@@ -102,8 +104,8 @@
repo.close(sessionid)
except BadConnectionId: #this is strange ? thread issue ?
print 'XXX unknown session', sessionid
- for pool in repo.pools:
- pool.close(True)
+ for cnxset in repo.cnxsets:
+ cnxset.close(True)
repo.system_source.shutdown()
repo._needs_refresh = True
repo._has_started = False
@@ -111,12 +113,12 @@
def turn_repo_on(repo):
"""Idea: this is less costly than a full re-creation of the repo object.
on:
- * pools are connected
+ * cnxsets are connected
* cache are cleared
"""
if repo._needs_refresh:
- for pool in repo.pools:
- pool.reconnect()
+ for cnxset in repo.cnxsets:
+ cnxset.reconnect()
repo._type_source_cache = {}
repo._extid_cache = {}
repo.querier._rql_cache = {}
@@ -197,7 +199,10 @@
directory from wich tests are launched or by specifying an alternative
sources file using self.sourcefile.
"""
- sources = super(TestServerConfiguration, self).sources()
+ try:
+ sources = super(TestServerConfiguration, self).sources()
+ except ExecutionError:
+ sources = {}
if not sources:
sources = DEFAULT_SOURCES
if 'admin' not in sources:
@@ -207,9 +212,6 @@
# web config methods needed here for cases when we use this config as a web
# config
- def instance_md5_version(self):
- return ''
-
def default_base_url(self):
return BASE_URL
@@ -475,12 +477,11 @@
repo = self.get_repo(startup=True)
cnx = self.get_cnx()
session = repo._sessions[cnx.sessionid]
- session.set_pool()
+ session.set_cnxset()
_commit = session.commit
- def always_pooled_commit():
- _commit()
- session.set_pool()
- session.commit = always_pooled_commit
+ def keep_cnxset_commit():
+ _commit(free_cnxset=False)
+ session.commit = keep_cnxset_commit
pre_setup_func(session, self.config)
session.commit()
cnx.close()
--- a/devtools/fake.py Wed Jun 08 15:11:45 2011 +0200
+++ b/devtools/fake.py Wed Jun 08 17:08:00 2011 +0200
@@ -138,13 +138,15 @@
class FakeSession(RequestSessionBase):
- read_security = write_security = True
- set_read_security = set_write_security = lambda *args, **kwargs: None
- def __init__(self, repo=None, user=None):
+ def __init__(self, repo=None, user=None, vreg=None):
self.repo = repo
- self.vreg = getattr(self.repo, 'vreg', CubicWebVRegistry(FakeConfig(), initlog=False))
- self.pool = FakePool()
+ if vreg is None:
+ vreg = getattr(self.repo, 'vreg', None)
+ if vreg is None:
+ vreg = CubicWebVRegistry(FakeConfig(), initlog=False)
+ self.vreg = vreg
+ self.cnxset = FakeConnectionsSet()
self.user = user or FakeUser()
self.is_internal_session = False
self.transaction_data = {}
@@ -162,6 +164,13 @@
def set_entity_cache(self, entity):
pass
+ # for use with enabled_security context manager
+ read_security = write_security = True
+ def init_security(self, *args):
+ return None, None
+ def reset_security(self, *args):
+ return
+
class FakeRepo(object):
querier = None
def __init__(self, schema, vreg=None, config=None):
@@ -201,6 +210,6 @@
self.uri = uri
-class FakePool(object):
+class FakeConnectionsSet(object):
def source(self, uri):
return FakeSource(uri)
--- a/devtools/fill.py Wed Jun 08 15:11:45 2011 +0200
+++ b/devtools/fill.py Wed Jun 08 17:08:00 2011 +0200
@@ -275,9 +275,6 @@
:param choice_func: a function that takes an entity type, an attrname and
returns acceptable values for this attribute
"""
- # XXX HACK, remove or fix asap
- if etype in set(('String', 'Int', 'Float', 'Boolean', 'Date', 'CWGroup', 'CWUser')):
- return []
queries = []
for index in xrange(entity_num):
restrictions = []
--- a/devtools/repotest.py Wed Jun 08 15:11:45 2011 +0200
+++ b/devtools/repotest.py Wed Jun 08 17:08:00 2011 +0200
@@ -205,7 +205,7 @@
self.ueid = self.session.user.eid
assert self.ueid != -1
self.repo._type_source_cache = {} # clear cache
- self.pool = self.session.set_pool()
+ self.cnxset = self.session.set_cnxset()
self.maxeid = self.get_max_eid()
do_monkey_patch()
self._dumb_sessions = []
@@ -213,7 +213,7 @@
def get_max_eid(self):
return self.session.execute('Any MAX(X)')[0][0]
def cleanup(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.session.execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
def tearDown(self):
@@ -225,7 +225,7 @@
for session in self._dumb_sessions:
session.rollback()
session.close()
- self.repo._free_pool(self.pool)
+ self.repo._free_cnxset(self.cnxset)
assert self.session.user.eid != -1
def set_debug(self, debug):
@@ -263,7 +263,8 @@
u = self.repo._build_user(self.session, self.session.user.eid)
u._groups = set(groups)
s = Session(u, self.repo)
- s._threaddata.pool = self.pool
+ s._threaddata.cnxset = self.cnxset
+ s._threaddata.ctx_count = 1
# register session to ensure it gets closed
self._dumb_sessions.append(s)
return s
@@ -273,7 +274,7 @@
def commit(self):
self.session.commit()
- self.session.set_pool()
+ self.session.set_cnxset()
class BasePlannerTC(BaseQuerierTC):
@@ -287,7 +288,7 @@
# XXX source_defs
self.o = self.repo.querier
self.session = self.repo._sessions.values()[0]
- self.pool = self.session.set_pool()
+ self.cnxset = self.session.set_cnxset()
self.schema = self.o.schema
self.sources = self.o._repo.sources
self.system = self.sources[-1]
@@ -311,7 +312,7 @@
del self.repo.sources_by_uri[source.uri]
undo_monkey_patch()
for session in self._dumb_sessions:
- session._threaddata.pool = None
+ session._threaddata.cnxset = None
session.close()
def _prepare_plan(self, rql, kwargs=None):
--- a/devtools/testlib.py Wed Jun 08 15:11:45 2011 +0200
+++ b/devtools/testlib.py Wed Jun 08 17:08:00 2011 +0200
@@ -274,7 +274,7 @@
def session(self):
"""return current server side session (using default manager account)"""
session = self.repo._sessions[self.cnx.sessionid]
- session.set_pool()
+ session.set_cnxset()
return session
@property
@@ -458,7 +458,7 @@
try:
return self.cnx.commit()
finally:
- self.session.set_pool() # ensure pool still set after commit
+ self.session.set_cnxset() # ensure cnxset still set after commit
@nocoverage
def rollback(self):
@@ -467,7 +467,7 @@
except dbapi.ProgrammingError:
pass # connection closed
finally:
- self.session.set_pool() # ensure pool still set after commit
+ self.session.set_cnxset() # ensure cnxset still set after commit
# # server side db api #######################################################
@@ -475,7 +475,7 @@
if eid_key is not None:
warn('[3.8] eid_key is deprecated, you can safely remove this argument',
DeprecationWarning, stacklevel=2)
- self.session.set_pool()
+ self.session.set_cnxset()
return self.session.execute(rql, args)
# other utilities #########################################################
@@ -500,6 +500,10 @@
it2 = set(getattr(x, 'eid', x) for x in it2)
super(CubicWebTC, self).assertItemsEqual(it1, it2, *args, **kwargs)
+ def assertMessageEqual(self, req, params, msg):
+ msg = req.session.data[params['_cwmsgid']]
+ self.assertEqual(msg, msg)
+
# workflow utilities #######################################################
def assertPossibleTransitions(self, entity, expected):
@@ -568,6 +572,8 @@
if views:
try:
view = viewsvreg._select_best(views, req, rset=rset)
+ if view is None:
+ raise NoSelectableObject((req,), {'rset':rset}, views)
if view.linkable():
yield view
else:
@@ -722,7 +728,7 @@
self.assertEqual(session.login, origsession.login)
self.assertEqual(session.anonymous_session, False)
self.assertEqual(path, 'view')
- self.assertEqual(params, {'__message': 'welcome %s !' % req.user.login})
+ self.assertMessageEqual(req, params, 'welcome %s !' % req.user.login)
def assertAuthFailure(self, req, nbsessions=0):
self.app.connect(req)
--- a/entities/test/unittest_wfobjs.py Wed Jun 08 15:11:45 2011 +0200
+++ b/entities/test/unittest_wfobjs.py Wed Jun 08 17:08:00 2011 +0200
@@ -165,7 +165,7 @@
user = self.user()
iworkflowable = user.cw_adapt_to('IWorkflowable')
iworkflowable.fire_transition('deactivate', comment=u'deactivate user')
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'deactivated')
self._test_manager_deactivate(user)
trinfo = self._test_manager_deactivate(user)
@@ -192,7 +192,7 @@
self.commit()
iworkflowable.fire_transition('wake up')
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'deactivated')
# XXX test managers can change state without matching transition
@@ -274,14 +274,14 @@
self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
iworkflowable.fire_transition('swftr1', u'go')
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_state.eid, swfstate1.eid)
self.assertEqual(iworkflowable.current_workflow.eid, swf.eid)
self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
self.assertEqual(iworkflowable.subworkflow_input_transition().eid, swftr1.eid)
iworkflowable.fire_transition('tr1', u'go')
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_state.eid, state2.eid)
self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
@@ -295,10 +295,10 @@
# force back to state1
iworkflowable.change_state('state1', u'gadget')
iworkflowable.fire_transition('swftr1', u'au')
- group.clear_all_caches()
+ group.cw_clear_all_caches()
iworkflowable.fire_transition('tr2', u'chapeau')
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_state.eid, state3.eid)
self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
@@ -390,7 +390,7 @@
):
iworkflowable.fire_transition(trans)
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, nextstate)
@@ -408,11 +408,11 @@
wf.add_state('asleep', initial=True)
self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': self.member.eid})
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
iworkflowable = self.member.cw_adapt_to('IWorkflowable')
self.assertEqual(iworkflowable.state, 'activated')# no change before commit
self.commit()
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual(iworkflowable.workflow_history, ())
@@ -429,7 +429,7 @@
self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': self.member.eid})
self.commit()
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual(parse_hist(iworkflowable.workflow_history),
@@ -472,10 +472,10 @@
self.commit()
self.execute('DELETE X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': self.member.eid})
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'asleep')# no change before commit
self.commit()
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_workflow.name, "default user workflow")
self.assertEqual(iworkflowable.state, 'activated')
self.assertEqual(parse_hist(iworkflowable.workflow_history),
@@ -504,13 +504,13 @@
self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': user.eid})
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
['rest'])
iworkflowable.fire_transition('rest')
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
['rest'])
@@ -520,7 +520,7 @@
self.commit()
iworkflowable.fire_transition('rest')
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'dead')
self.assertEqual(parse_hist(iworkflowable.workflow_history),
[('asleep', 'asleep', 'rest', None),
@@ -557,7 +557,7 @@
def setUp(self):
CubicWebTC.setUp(self)
self.wf = self.session.user.cw_adapt_to('IWorkflowable').current_workflow
- self.session.set_pool()
+ self.session.set_cnxset()
self.s_activated = self.wf.state_by_name('activated').eid
self.s_deactivated = self.wf.state_by_name('deactivated').eid
self.s_dummy = self.wf.add_state(u'dummy').eid
@@ -629,13 +629,13 @@
iworkflowable = user.cw_adapt_to('IWorkflowable')
iworkflowable.fire_transition('deactivate')
cnx.commit()
- session.set_pool()
+ session.set_cnxset()
with self.assertRaises(ValidationError) as cm:
iworkflowable.fire_transition('deactivate')
self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
u"transition isn't allowed from")
cnx.rollback()
- session.set_pool()
+ session.set_cnxset()
# get back now
iworkflowable.fire_transition('activate')
cnx.commit()
--- a/entities/wfobjs.py Wed Jun 08 15:11:45 2011 +0200
+++ b/entities/wfobjs.py Wed Jun 08 17:08:00 2011 +0200
@@ -326,8 +326,8 @@
result[ep.subwf_state.eid] = ep.destination and ep.destination.eid
return result
- def clear_all_caches(self):
- super(WorkflowTransition, self).clear_all_caches()
+ def cw_clear_all_caches(self):
+ super(WorkflowTransition, self).cw_clear_all_caches()
clear_cache(self, 'exit_points')
--- a/entity.py Wed Jun 08 15:11:45 2011 +0200
+++ b/entity.py Wed Jun 08 17:08:00 2011 +0200
@@ -942,7 +942,7 @@
assert role
self._cw_related_cache.pop('%s_%s' % (rtype, role), None)
- def clear_all_caches(self): # XXX cw_clear_all_caches
+ def cw_clear_all_caches(self):
"""flush all caches on this entity. Further attributes/relations access
will triggers new database queries to get back values.
@@ -1024,6 +1024,10 @@
# deprecated stuff #########################################################
+ @deprecated('[3.13] use entity.cw_clear_all_caches()')
+ def clear_all_caches(self):
+ return self.cw_clear_all_caches()
+
@deprecated('[3.9] use entity.cw_attr_value(attr)')
def get_value(self, name):
return self.cw_attr_value(name)
--- a/etwist/server.py Wed Jun 08 15:11:45 2011 +0200
+++ b/etwist/server.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -17,14 +17,19 @@
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""twisted server for CubicWeb web instances"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
import os
+import os.path as osp
import select
import errno
import traceback
import threading
+import re
+import hashlib
from os.path import join
from time import mktime
from datetime import date, timedelta
@@ -41,7 +46,8 @@
from logilab.common.decorators import monkeypatch
-from cubicweb import AuthenticationError, ConfigurationError, CW_EVENT_MANAGER
+from cubicweb import (AuthenticationError, ConfigurationError,
+ CW_EVENT_MANAGER, CubicWebException)
from cubicweb.utils import json_dumps
from cubicweb.web import Redirect, DirectResponse, StatusResponse, LogOut
from cubicweb.web.application import CubicWebPublisher
@@ -70,13 +76,73 @@
code=http.FORBIDDEN,
stream='Access forbidden')
-class File(static.File):
- """Prevent from listing directories"""
+
+class NoListingFile(static.File):
def directoryListing(self):
return ForbiddenDirectoryLister()
-class LongTimeExpiringFile(File):
+class DataLookupDirectory(NoListingFile):
+ def __init__(self, config, path):
+ self.md5_version = config.instance_md5_version()
+ NoListingFile.__init__(self, path)
+ self.config = config
+ self.here = path
+ self._defineChildResources()
+ if self.config.debugmode:
+ self.data_modconcat_basepath = '/data/??'
+ else:
+ self.data_modconcat_basepath = '/data/%s/??' % self.md5_version
+
+ def _defineChildResources(self):
+ self.putChild(self.md5_version, self)
+
+ def getChild(self, path, request):
+ if not path:
+ uri = request.uri
+ if uri.startswith('/https/'):
+ uri = uri[6:]
+ if uri.startswith(self.data_modconcat_basepath):
+ resource_relpath = uri[len(self.data_modconcat_basepath):]
+ if resource_relpath:
+ paths = resource_relpath.split(',')
+ try:
+ return ConcatFiles(self.config, paths)
+ except ConcatFileNotFoundError:
+ return self.childNotFound
+ return self.directoryListing()
+ childpath = join(self.here, path)
+ dirpath, rid = self.config.locate_resource(childpath)
+ if dirpath is None:
+ # resource not found
+ return self.childNotFound
+ filepath = os.path.join(dirpath, rid)
+ if os.path.isdir(filepath):
+ resource = DataLookupDirectory(self.config, childpath)
+ # cache resource for this segment path to avoid recomputing
+ # directory lookup
+ self.putChild(path, resource)
+ return resource
+ else:
+ return NoListingFile(filepath)
+
+
+class FCKEditorResource(NoListingFile):
+ def __init__(self, config, path):
+ NoListingFile.__init__(self, path)
+ self.config = config
+
+ def getChild(self, path, request):
+ pre_path = request.path.split('/')[1:]
+ if pre_path[0] == 'https':
+ pre_path.pop(0)
+ uiprops = self.config.https_uiprops
+ else:
+ uiprops = self.config.uiprops
+ return static.File(osp.join(uiprops['FCKEDITOR_PATH'], path))
+
+
+class LongTimeExpiringFile(DataLookupDirectory):
"""overrides static.File and sets a far future ``Expires`` date
on the resouce.
@@ -88,28 +154,77 @@
etc.
"""
+ def _defineChildResources(self):
+ pass
+
def render(self, request):
# XXX: Don't provide additional resource information to error responses
#
# the HTTP RFC recommands not going further than 1 year ahead
expires = date.today() + timedelta(days=6*30)
request.setHeader('Expires', generateDateTime(mktime(expires.timetuple())))
- return File.render(self, request)
+ return DataLookupDirectory.render(self, request)
+
+
+class ConcatFileNotFoundError(CubicWebException):
+ pass
+
+
+class ConcatFiles(LongTimeExpiringFile):
+ def __init__(self, config, paths):
+ _, ext = osp.splitext(paths[0])
+ # create a unique / predictable filename
+ fname = 'cache_concat_' + hashlib.md5(';'.join(paths)).hexdigest() + ext
+ filepath = osp.join(config.appdatahome, 'uicache', fname)
+ LongTimeExpiringFile.__init__(self, config, filepath)
+ self._concat_cached_filepath(filepath, paths)
+ def _concat_cached_filepath(self, filepath, paths):
+ if not self._up_to_date(filepath, paths):
+ concat_data = []
+ for path in paths:
+ # FIXME locate_resource is called twice() in debug-mode, but
+ # it's a @cached method
+ dirpath, rid = self.config.locate_resource(path)
+ if rid is None:
+ raise ConcatFileNotFoundError(path)
+ concat_data.append(open(osp.join(dirpath, rid)).read())
+ with open(filepath, 'wb') as f:
+ f.write('\n'.join(concat_data))
+
+ def _up_to_date(self, filepath, paths):
+ """
+ The concat-file is considered up-to-date if it exists.
+ In debug mode, an additional check is performed to make sure that
+ concat-file is more recent than all concatenated files
+ """
+ if not osp.isfile(filepath):
+ return False
+ if self.config.debugmode:
+ concat_lastmod = os.stat(filepath).st_mtime
+ for path in paths:
+ dirpath, rid = self.config.locate_resource(path)
+ if rid is None:
+ raise ConcatFileNotFoundError(path)
+ path = osp.join(dirpath, rid)
+ if os.stat(path).st_mtime > concat_lastmod:
+ return False
+ return True
class CubicWebRootResource(resource.Resource):
def __init__(self, config, vreg=None):
+ resource.Resource.__init__(self)
self.config = config
# instantiate publisher here and not in init_publisher to get some
# checks done before daemonization (eg versions consistency)
self.appli = CubicWebPublisher(config, vreg=vreg)
self.base_url = config['base-url']
self.https_url = config['https-url']
- self.children = {}
- self.static_directories = set(('data%s' % config.instance_md5_version(),
- 'data', 'static', 'fckeditor'))
global MAX_POST_LENGTH
MAX_POST_LENGTH = config['max-post-length']
+ self.putChild('static', NoListingFile(config.static_directory))
+ self.putChild('fckeditor', FCKEditorResource(self.config, ''))
+ self.putChild('data', DataLookupDirectory(self.config, ''))
def init_publisher(self):
config = self.config
@@ -152,38 +267,6 @@
def getChild(self, path, request):
"""Indicate which resource to use to process down the URL's path"""
- pre_path = request.path.split('/')[1:]
- if pre_path[0] == 'https':
- pre_path.pop(0)
- uiprops = self.config.https_uiprops
- else:
- uiprops = self.config.uiprops
- directory = pre_path[0]
- # Anything in data/, static/, fckeditor/ and the generated versioned
- # data directory is treated as static files
- if directory in self.static_directories:
- # take care fckeditor may appears as root directory or as a data
- # subdirectory
- if directory == 'static':
- return File(self.config.static_directory)
- if directory == 'fckeditor':
- return File(uiprops['FCKEDITOR_PATH'])
- if directory != 'data':
- # versioned directory, use specific file with http cache
- # headers so their are cached for a very long time
- cls = LongTimeExpiringFile
- else:
- cls = File
- if path == 'fckeditor':
- return cls(uiprops['FCKEDITOR_PATH'])
- if path == directory: # recurse
- return self
- datadir, path = self.config.locate_resource(path)
- if datadir is None:
- return self # recurse
- self.debug('static file %s from %s', path, datadir)
- return cls(join(datadir, path))
- # Otherwise we use this single resource
return self
def render(self, request):
--- a/hooks/__init__.py Wed Jun 08 15:11:45 2011 +0200
+++ b/hooks/__init__.py Wed Jun 08 17:08:00 2011 +0200
@@ -63,11 +63,9 @@
source.info('added %s entities', len(stats['created']))
if stats.get('updated'):
source.info('updated %s entities', len(stats['updated']))
- session.commit()
except Exception, exc:
session.exception('while trying to update feed %s', source)
- session.rollback()
- session.set_pool()
+ session.set_cnxset()
finally:
session.close()
self.repo.looping_task(60, update_feeds, self.repo)
--- a/hooks/metadata.py Wed Jun 08 15:11:45 2011 +0200
+++ b/hooks/metadata.py Wed Jun 08 17:08:00 2011 +0200
@@ -23,6 +23,7 @@
from cubicweb.selectors import is_instance
from cubicweb.server import hook
+from cubicweb.server.edition import EditedEntity
class MetaDataHook(hook.Hook):
@@ -68,8 +69,9 @@
def precommit_event(self):
session = self.session
relations = [(eid, session.user.eid) for eid in self.get_data()
- # don't consider entities that have been created and
- # deleted in the same transaction
+ # don't consider entities that have been created and deleted in
+ # the same transaction, nor ones where created_by has been
+ # explicitly set
if not session.deleted_in_transaction(eid) and \
not session.entity_from_eid(eid).created_by]
session.add_relations([('created_by', relations)])
@@ -141,3 +143,73 @@
session.repo.system_source.index_entity(
session, session.entity_from_eid(self.eidto))
+
+
+# entity source handling #######################################################
+
+class ChangeEntityUpdateCaches(hook.Operation):
+ def postcommit_event(self):
+ self.oldsource.reset_caches()
+ repo = self.session.repo
+ entity = self.entity
+ extid = entity.cw_metainformation()['extid']
+ repo._type_source_cache[entity.eid] = (
+ entity.__regid__, self.newsource.uri, None)
+ if self.oldsource.copy_based_source:
+ uri = 'system'
+ else:
+ uri = self.oldsource.uri
+ repo._extid_cache[(extid, uri)] = -entity.eid
+
+class ChangeEntitySourceDeleteHook(MetaDataHook):
+ """support for moving an entity from an external source by watching 'Any
+ cw_source CWSource' relation
+ """
+
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if (self._cw.deleted_in_transaction(self.eidfrom)
+ or self._cw.deleted_in_transaction(self.eidto)):
+ return
+ schange = self._cw.transaction_data.setdefault('cw_source_change', {})
+ schange[self.eidfrom] = self.eidto
+
+class ChangeEntitySourceAddHook(MetaDataHook):
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_add_relation',)
+
+ def __call__(self):
+ schange = self._cw.transaction_data.get('cw_source_change')
+ if schange is not None and self.eidfrom in schange:
+ newsource = self._cw.entity_from_eid(self.eidto)
+ if newsource.name != 'system':
+ raise Exception('changing source to something else than the '
+ 'system source is unsupported')
+ syssource = newsource.repo_source
+ oldsource = self._cw.entity_from_eid(schange[self.eidfrom])
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ # copy entity if necessary
+ if not oldsource.repo_source.copy_based_source:
+ entity.complete(skip_bytes=False)
+ entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
+ syssource.add_entity(self._cw, entity)
+ # we don't want the moved entity to be reimported later. To
+ # distinguish this state, the trick is to change the associated
+ # record in the 'entities' system table with eid=-eid while leaving
+ # other fields unchanged, and to add a new record with eid=eid,
+ # source='system'. External source will then have consider case
+ # where `extid2eid` return a negative eid as 'this entity was known
+ # but has been moved, ignore it'.
+ self._cw.system_sql('UPDATE entities SET eid=-eid,source=%(source)s WHERE eid=%(eid)s',
+ {'eid': self.eidfrom, 'source': newsource.name})
+ attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': None,
+ 'source': 'system', 'mtime': datetime.now()}
+ self._cw.system_sql(syssource.sqlgen.insert('entities', attrs), attrs)
+ # register an operation to update repository/sources caches
+ ChangeEntityUpdateCaches(self._cw, entity=entity,
+ oldsource=oldsource.repo_source,
+ newsource=syssource)
--- a/hooks/syncschema.py Wed Jun 08 15:11:45 2011 +0200
+++ b/hooks/syncschema.py Wed Jun 08 17:08:00 2011 +0200
@@ -42,12 +42,15 @@
TYPE_CONVERTER = { # XXX
'Boolean': bool,
'Int': int,
+ 'BigInt': int,
'Float': float,
'Password': str,
'String': unicode,
'Date' : unicode,
'Datetime' : unicode,
'Time' : unicode,
+ 'TZDatetime' : unicode,
+ 'TZTime' : unicode,
}
# core entity and relation types which can't be removed
@@ -92,7 +95,7 @@
# create index before alter table which may expectingly fail during test
# (sqlite) while index creation should never fail (test for index existence
# is done by the dbhelper)
- session.pool.source('system').create_index(session, table, column)
+ session.cnxset.source('system').create_index(session, table, column)
session.info('added index on %s(%s)', table, column)
@@ -252,7 +255,7 @@
description=entity.description)
eschema = schema.add_entity_type(etype)
# create the necessary table
- tablesql = y2sql.eschema2sql(session.pool.source('system').dbhelper,
+ tablesql = y2sql.eschema2sql(session.cnxset.source('system').dbhelper,
eschema, prefix=SQL_PREFIX)
for sql in tablesql.split(';'):
if sql.strip():
@@ -289,7 +292,7 @@
self.session.vreg.schema.rename_entity_type(oldname, newname)
# we need sql to operate physical changes on the system database
sqlexec = self.session.system_sql
- dbhelper= self.session.pool.source('system').dbhelper
+ dbhelper= self.session.cnxset.source('system').dbhelper
sql = dbhelper.sql_rename_table(SQL_PREFIX+oldname,
SQL_PREFIX+newname)
sqlexec(sql)
@@ -433,7 +436,7 @@
# update the in-memory schema first
rdefdef = self.init_rdef(**props)
# then make necessary changes to the system source database
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
attrtype = y2sql.type_from_constraints(
syssource.dbhelper, rdefdef.object, rdefdef.constraints)
# XXX should be moved somehow into lgdb: sqlite doesn't support to
@@ -603,7 +606,7 @@
self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values)
rdef.update(self.values)
# then make necessary changes to the system source database
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
if 'indexed' in self.values:
syssource.update_rdef_indexed(session, rdef)
self.indexed_changed = True
@@ -621,7 +624,7 @@
# revert changes on in memory schema
self.rdef.update(self.oldvalues)
# revert changes on database
- syssource = self.session.pool.source('system')
+ syssource = self.session.cnxset.source('system')
if self.indexed_changed:
syssource.update_rdef_indexed(self.session, self.rdef)
if self.null_allowed_changed:
@@ -649,7 +652,7 @@
rdef.constraints.remove(self.oldcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
cstrtype = self.oldcstr.type()
if cstrtype == 'SizeConstraint':
syssource.update_rdef_column(session, rdef)
@@ -665,7 +668,7 @@
if self.oldcstr is not None:
self.rdef.constraints.append(self.oldcstr)
# revert changes on database
- syssource = self.session.pool.source('system')
+ syssource = self.session.cnxset.source('system')
if self.size_cstr_changed:
syssource.update_rdef_column(self.session, self.rdef)
if self.unique_changed:
@@ -696,7 +699,7 @@
rdef.constraints.append(newcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
if cstrtype == 'SizeConstraint' and (oldcstr is None or
oldcstr.max != newcstr.max):
syssource.update_rdef_column(session, rdef)
@@ -713,7 +716,7 @@
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.constraint_of[0].name)
cols = ['%s%s' % (prefix, r.name) for r in self.entity.relations]
- dbhelper= session.pool.source('system').dbhelper
+ dbhelper= session.cnxset.source('system').dbhelper
sqls = dbhelper.sqls_create_multicol_unique_index(table, cols)
for sql in sqls:
session.system_sql(sql)
@@ -733,7 +736,7 @@
session = self.session
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.type)
- dbhelper= session.pool.source('system').dbhelper
+ dbhelper= session.cnxset.source('system').dbhelper
cols = ['%s%s' % (prefix, c) for c in self.cols]
sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols)
for sql in sqls:
@@ -782,7 +785,7 @@
"""
def precommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections.cnxset has been commited"""
try:
erschema = self.session.vreg.schema.schema_by_eid(self.eid)
except KeyError:
@@ -811,7 +814,7 @@
"""
def precommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
erschema = self.session.vreg.schema.schema_by_eid(self.eid)
except KeyError:
@@ -1223,7 +1226,7 @@
source.fti_index_entities(session, [container])
if to_reindex:
# Transaction has already been committed
- session.pool.commit()
+ session.cnxset.commit()
--- a/hooks/syncsession.py Wed Jun 08 15:11:45 2011 +0200
+++ b/hooks/syncsession.py Wed Jun 08 17:08:00 2011 +0200
@@ -56,7 +56,7 @@
class _DeleteGroupOp(_GroupOperation):
"""synchronize user when a in_group relation has been deleted"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
groups = self.cnxuser.groups
try:
groups.remove(self.group)
@@ -67,7 +67,7 @@
class _AddGroupOp(_GroupOperation):
"""synchronize user when a in_group relation has been added"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
groups = self.cnxuser.groups
if self.group in groups:
self.warning('user %s already in group %s', self.cnxuser,
@@ -97,7 +97,7 @@
hook.Operation.__init__(self, session)
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
self.session.repo.close(self.cnxid)
except BadConnectionId:
@@ -122,7 +122,7 @@
"""a user's custom properties has been deleted"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
del self.cwpropdict[self.key]
except KeyError:
@@ -133,7 +133,7 @@
"""a user's custom properties has been added/changed"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
self.cwpropdict[self.key] = self.value
@@ -141,7 +141,7 @@
"""a user's custom properties has been added/changed"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
cwprop = self.cwprop
if not cwprop.for_user:
self.session.vreg['propertyvalues'][cwprop.pkey] = cwprop.value
--- a/hooks/syncsources.py Wed Jun 08 15:11:45 2011 +0200
+++ b/hooks/syncsources.py Wed Jun 08 17:08:00 2011 +0200
@@ -30,6 +30,8 @@
category = 'cw.sources'
+# repo sources synchronization #################################################
+
class SourceAddedOp(hook.Operation):
def postcommit_event(self):
self.session.repo.add_source(self.entity)
@@ -100,8 +102,10 @@
pass
-# source mapping synchronization. Expect cw_for_source/cw_schema are immutable
-# relations (i.e. can't change from a source or schema to another).
+# source mapping synchronization ###############################################
+#
+# Expect cw_for_source/cw_schema are immutable relations (i.e. can't change from
+# a source or schema to another).
class SourceMappingDeleteHook(SourceHook):
"""check cw_for_source and cw_schema are immutable relations
@@ -161,3 +165,4 @@
SourceMappingChangedOp.get_instance(self._cw).add_data(
(self._cw.entity_from_eid(self.eidfrom),
self._cw.entity_from_eid(self.eidto)) )
+
--- a/hooks/test/unittest_syncschema.py Wed Jun 08 15:11:45 2011 +0200
+++ b/hooks/test/unittest_syncschema.py Wed Jun 08 17:08:00 2011 +0200
@@ -36,9 +36,9 @@
self.__class__.schema_eids = schema_eids_idx(self.repo.schema)
def index_exists(self, etype, attr, unique=False):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
return dbhelper.index_exists(sqlcursor, SQL_PREFIX + etype, SQL_PREFIX + attr, unique=unique)
def _set_perms(self, eid):
@@ -57,9 +57,9 @@
def test_base(self):
schema = self.repo.schema
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
self.failIf(schema.has_entity('Societe2'))
self.failIf(schema.has_entity('concerne2'))
# schema should be update on insertion (after commit)
@@ -170,9 +170,9 @@
# schema modification hooks tests #########################################
def test_uninline_relation(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
self.failUnless(self.schema['state_of'].inlined)
try:
self.execute('SET X inlined FALSE WHERE X name "state_of"')
@@ -195,9 +195,9 @@
self.assertEqual(len(rset), 2)
def test_indexed_change(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
try:
self.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"')
self.failUnless(self.schema['name'].rdef('Workflow', 'String').indexed)
@@ -214,9 +214,9 @@
self.failUnless(self.index_exists('Workflow', 'name'))
def test_unique_change(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
try:
self.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X '
'WHERE CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,'
--- a/hooks/workflow.py Wed Jun 08 15:11:45 2011 +0200
+++ b/hooks/workflow.py Wed Jun 08 17:08:00 2011 +0200
@@ -148,7 +148,7 @@
class WorkflowHook(hook.Hook):
__abstract__ = True
- category = 'workflow'
+ category = 'metadata'
class SetInitialStateHook(WorkflowHook):
@@ -160,21 +160,15 @@
_SetInitialStateOp(self._cw, entity=self.entity)
-class PrepareStateChangeHook(WorkflowHook):
- """record previous state information"""
- __regid__ = 'cwdelstate'
- __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
- events = ('before_delete_relation',)
+class FireTransitionHook(WorkflowHook):
+ """check the transition is allowed and add missing information into the
+ TrInfo entity.
- def __call__(self):
- self._cw.transaction_data.setdefault('pendingrelations', []).append(
- (self.eidfrom, self.rtype, self.eidto))
-
-
-class FireTransitionHook(WorkflowHook):
- """check the transition is allowed, add missing information. Expect that:
+ Expect that:
* wf_info_for inlined relation is set
* by_transition or to_state (managers only) inlined relation is set
+
+ Check for automatic transition to be fired at the end
"""
__regid__ = 'wffiretransition'
__select__ = WorkflowHook.__select__ & is_instance('TrInfo')
@@ -273,7 +267,7 @@
class FiredTransitionHook(WorkflowHook):
- """change related entity state"""
+ """change related entity state and handle exit of subworkflow"""
__regid__ = 'wffiretransition'
__select__ = WorkflowHook.__select__ & is_instance('TrInfo')
events = ('after_add_entity',)
@@ -296,6 +290,7 @@
__regid__ = 'wfcheckinstate'
__select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
events = ('before_add_relation',)
+ category = 'integrity'
def __call__(self):
session = self._cw
--- a/i18n/de.po Wed Jun 08 15:11:45 2011 +0200
+++ b/i18n/de.po Wed Jun 08 17:08:00 2011 +0200
@@ -256,6 +256,12 @@
msgid "BaseTransition_plural"
msgstr "Übergänge (abstrakt)"
+msgid "BigInt"
+msgstr ""
+
+msgid "BigInt_plural"
+msgstr ""
+
msgid "Bookmark"
msgstr "Lesezeichen"
@@ -1849,6 +1855,9 @@
msgid "ctxtoolbar"
msgstr "Werkzeugleiste"
+msgid "currently in synchronization"
+msgstr ""
+
msgid "custom_workflow"
msgstr "angepasster Workflow"
@@ -2382,6 +2391,9 @@
msgid "external page"
msgstr "externe Seite"
+msgid "facet-loading-msg"
+msgstr ""
+
msgid "facet.filters"
msgstr ""
@@ -2566,9 +2578,6 @@
"generische Relation, die anzeigt, dass eine Entität mit einer anderen Web-"
"Ressource identisch ist (siehe http://www.w3.org/TR/owl-ref/#sameAs-def)."
-msgid "go back to the index page"
-msgstr "Zurück zur Index-Seite"
-
msgid "granted to groups"
msgstr "an Gruppen gewährt"
@@ -3178,6 +3187,12 @@
msgid "no associated permissions"
msgstr "keine entsprechende Berechtigung"
+msgid "no content next link"
+msgstr ""
+
+msgid "no content prev link"
+msgstr ""
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr "kein Eingabefeld spezifiziert Für Entität %s"
@@ -3888,6 +3903,13 @@
msgid "synchronization-interval must be greater than 1 minute"
msgstr ""
+msgid "synchronizing"
+msgstr ""
+
+msgctxt "CWSource"
+msgid "synchronizing"
+msgstr ""
+
msgid "table"
msgstr "Tabelle"
@@ -3926,6 +3948,12 @@
msgstr ""
"Der Wert \"%s\" wird bereits benutzt, bitte verwenden Sie einen anderen Wert"
+msgid "there is no next page"
+msgstr ""
+
+msgid "there is no previous page"
+msgstr ""
+
msgid "this action is not reversible!"
msgstr "Achtung! Diese Aktion ist unumkehrbar."
@@ -4025,8 +4053,8 @@
msgid "tr_count"
msgstr ""
-msgid "transaction undone"
-msgstr "Transaktion rückgängig gemacht"
+msgid "transaction undoed"
+msgstr ""
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
@@ -4310,30 +4338,30 @@
msgid "value"
msgstr "Wert"
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr ""
+
msgid "value associated to this key is not editable manually"
msgstr ""
"Der mit diesem Schlüssele verbundene Wert kann n icht manuell geändert "
"werden."
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr "Der Wert muss %(op)s %(boundary)s sein."
-
-#, python-format
-msgid "value must be <= %(boundary)s"
-msgstr "Der Wert muss <= %(boundary)s sein."
+msgid "value should have maximum size of %s but found %s"
+msgstr ""
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr "Der Wert muss >= %(boundary)s sein."
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr "Der Wert darf höchstens %s betragen."
-
-#, python-format
-msgid "value should have minimum size of %s"
-msgstr "Der Wert muss mindestens %s betragen."
+msgid "value should have minimum size of %s but found %s"
+msgstr ""
msgid "vcard"
msgstr "VCard"
@@ -4479,76 +4507,3 @@
#, python-format
msgid "you should un-inline relation %s which is supported and may be crossed "
msgstr ""
-
-#~ msgid "Attributes with non default permissions:"
-#~ msgstr "Attribute mit nicht-standard-Berechtigungen"
-
-#~ msgid "Entity types"
-#~ msgstr "Entitätstypen"
-
-#~ msgid "Index"
-#~ msgstr "Index"
-
-#~ msgid "Permissions for entity types"
-#~ msgstr "Berechtigungen für Entitätstypen"
-
-#~ msgid "Permissions for relations"
-#~ msgstr "Berechtigungen für Relationen"
-
-#~ msgid "Relation types"
-#~ msgstr "Relationstypen"
-
-#~ msgid "am/pm calendar (month)"
-#~ msgstr "am/pm Kalender (Monat)"
-
-#~ msgid "am/pm calendar (semester)"
-#~ msgstr "am/pm Kalender (Halbjahr)"
-
-#~ msgid "am/pm calendar (week)"
-#~ msgstr "am/pm Kalender (Woche)"
-
-#~ msgid "am/pm calendar (year)"
-#~ msgstr "am/pm Kalender (Jahr)"
-
-#~ msgid "application entities"
-#~ msgstr "Anwendungs-Entitäten"
-
-#~ msgid "calendar (month)"
-#~ msgstr "Kalender (monatlich)"
-
-#~ msgid "calendar (semester)"
-#~ msgstr "Kalender (halbjährlich)"
-
-#~ msgid "calendar (week)"
-#~ msgstr "Kalender (wöchentlich)"
-
-#~ msgid "calendar (year)"
-#~ msgstr "Kalender (jährlich)"
-
-#~ msgid ""
-#~ "can't set inlined=%(inlined)s, %(stype)s %(rtype)s %(otype)s has "
-#~ "cardinality=%(card)s"
-#~ msgstr ""
-#~ "Kann 'inlined' = %(inlined)s nicht zuweisen, %(stype)s %(rtype)s %(otype)"
-#~ "s hat die Kardinalität %(card)s"
-
-#~ msgid "create an index page"
-#~ msgstr "Eine Index-Seite anlegen"
-
-#~ msgid "edit the index page"
-#~ msgstr "Index-Seite bearbeiten"
-
-#~ msgid "schema entities"
-#~ msgstr "Entitäten, die das Schema definieren"
-
-#~ msgid "schema-security"
-#~ msgstr "Rechte"
-
-#~ msgid "system entities"
-#~ msgstr "System-Entitäten"
-
-#~ msgid "timestamp of the latest source synchronization."
-#~ msgstr "Zeitstempel der letzten Synchronisierung mit der Quelle."
-
-#~ msgid "up"
-#~ msgstr "nach oben"
--- a/i18n/en.po Wed Jun 08 15:11:45 2011 +0200
+++ b/i18n/en.po Wed Jun 08 17:08:00 2011 +0200
@@ -5,7 +5,7 @@
msgstr ""
"Project-Id-Version: 2.0\n"
"POT-Creation-Date: 2006-01-12 17:35+CET\n"
-"PO-Revision-Date: 2010-09-15 14:55+0200\n"
+"PO-Revision-Date: 2011-04-29 12:57+0200\n"
"Last-Translator: Sylvain Thenault <sylvain.thenault@logilab.fr>\n"
"Language-Team: English <devel@logilab.fr.org>\n"
"Language: en\n"
@@ -245,6 +245,12 @@
msgid "BaseTransition_plural"
msgstr "Transitions (abstract)"
+msgid "BigInt"
+msgstr "Big integer"
+
+msgid "BigInt_plural"
+msgstr "Big integers"
+
msgid "Bookmark"
msgstr "Bookmark"
@@ -503,7 +509,7 @@
msgstr "Interval"
msgid "IntervalBoundConstraint"
-msgstr "interval constraint"
+msgstr "Interval constraint"
msgid "Interval_plural"
msgstr "Intervals"
@@ -1804,6 +1810,9 @@
msgid "ctxtoolbar"
msgstr "toolbar"
+msgid "currently in synchronization"
+msgstr ""
+
msgid "custom_workflow"
msgstr "custom workflow"
@@ -2324,6 +2333,9 @@
msgid "external page"
msgstr ""
+msgid "facet-loading-msg"
+msgstr "processing, please wait"
+
msgid "facet.filters"
msgstr "filter"
@@ -2506,9 +2518,6 @@
"object as a local one: http://www.w3.org/TR/owl-ref/#sameAs-def"
msgstr ""
-msgid "go back to the index page"
-msgstr ""
-
msgid "granted to groups"
msgstr ""
@@ -3089,6 +3098,12 @@
msgid "no associated permissions"
msgstr ""
+msgid "no content next link"
+msgstr ""
+
+msgid "no content prev link"
+msgstr ""
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr ""
@@ -3784,6 +3799,13 @@
msgid "synchronization-interval must be greater than 1 minute"
msgstr ""
+msgid "synchronizing"
+msgstr ""
+
+msgctxt "CWSource"
+msgid "synchronizing"
+msgstr ""
+
msgid "table"
msgstr ""
@@ -3821,6 +3843,12 @@
msgid "the value \"%s\" is already used, use another one"
msgstr ""
+msgid "there is no next page"
+msgstr ""
+
+msgid "there is no previous page"
+msgstr ""
+
msgid "this action is not reversible!"
msgstr ""
@@ -3920,7 +3948,7 @@
msgid "tr_count"
msgstr "transition number"
-msgid "transaction undone"
+msgid "transaction undoed"
msgstr ""
#, python-format
@@ -4196,27 +4224,27 @@
msgid "value"
msgstr ""
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr ""
+
msgid "value associated to this key is not editable manually"
msgstr ""
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr ""
-
-#, python-format
-msgid "value must be <= %(boundary)s"
+msgid "value should have maximum size of %s but found %s"
msgstr ""
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr ""
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr ""
-
-#, python-format
-msgid "value should have minimum size of %s"
+msgid "value should have minimum size of %s but found %s"
msgstr ""
msgid "vcard"
@@ -4361,10 +4389,3 @@
#, python-format
msgid "you should un-inline relation %s which is supported and may be crossed "
msgstr ""
-
-#~ msgctxt "CWAttribute"
-#~ msgid "relations_object"
-#~ msgstr "constrained by"
-
-#~ msgid "schema-security"
-#~ msgstr "permissions"
--- a/i18n/es.po Wed Jun 08 15:11:45 2011 +0200
+++ b/i18n/es.po Wed Jun 08 17:08:00 2011 +0200
@@ -257,6 +257,12 @@
msgid "BaseTransition_plural"
msgstr "Transiciones (abstractas)"
+msgid "BigInt"
+msgstr ""
+
+msgid "BigInt_plural"
+msgstr ""
+
msgid "Bookmark"
msgstr "Favorito"
@@ -1878,6 +1884,9 @@
msgid "ctxtoolbar"
msgstr "Barra de herramientas"
+msgid "currently in synchronization"
+msgstr ""
+
msgid "custom_workflow"
msgstr "Workflow especÃfico"
@@ -2425,6 +2434,9 @@
msgid "external page"
msgstr "Página externa"
+msgid "facet-loading-msg"
+msgstr ""
+
msgid "facet.filters"
msgstr "Filtros"
@@ -2609,9 +2621,6 @@
"Relación genérica que indicar que una entidad es idéntica a otro recurso web "
"(ver http://www.w3.org/TR/owl-ref/#sameAs-def)."
-msgid "go back to the index page"
-msgstr "Regresar a la página de inicio"
-
msgid "granted to groups"
msgstr "Otorgado a los grupos"
@@ -3146,11 +3155,11 @@
msgctxt "CWSource"
msgid "name"
-msgstr "nombre"
+msgstr ""
msgctxt "State"
msgid "name"
-msgstr "Nombre"
+msgstr "nombre"
msgctxt "Transition"
msgid "name"
@@ -3219,6 +3228,12 @@
msgid "no associated permissions"
msgstr "No existe permiso asociado"
+msgid "no content next link"
+msgstr ""
+
+msgid "no content prev link"
+msgstr ""
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr "Ningún campo editable especificado para la entidad %s"
@@ -3938,6 +3953,13 @@
msgid "synchronization-interval must be greater than 1 minute"
msgstr "synchronization-interval debe ser mayor a 1 minuto"
+msgid "synchronizing"
+msgstr ""
+
+msgctxt "CWSource"
+msgid "synchronizing"
+msgstr ""
+
msgid "table"
msgstr "Tabla"
@@ -3976,6 +3998,12 @@
msgid "the value \"%s\" is already used, use another one"
msgstr "El valor \"%s\" ya esta en uso, favor de utilizar otro"
+msgid "there is no next page"
+msgstr ""
+
+msgid "there is no previous page"
+msgstr ""
+
msgid "this action is not reversible!"
msgstr "Esta acción es irreversible!."
@@ -4075,8 +4103,8 @@
msgid "tr_count"
msgstr "n° de transición"
-msgid "transaction undone"
-msgstr "Transacciones Anuladas"
+msgid "transaction undoed"
+msgstr ""
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
@@ -4360,28 +4388,28 @@
msgid "value"
msgstr "Vampr"
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr ""
+
msgid "value associated to this key is not editable manually"
msgstr "El valor asociado a este elemento no es editable manualmente"
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr "El valor debe ser %(op)s %(boundary)s"
-
-#, python-format
-msgid "value must be <= %(boundary)s"
-msgstr "El valor debe ser <= %(boundary)s"
+msgid "value should have maximum size of %s but found %s"
+msgstr ""
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr "El valor debe ser >= %(boundary)s"
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr "El valor no debe exceder de %s"
-
-#, python-format
-msgid "value should have minimum size of %s"
-msgstr "El valor no puede ser menor a %s"
+msgid "value should have minimum size of %s but found %s"
+msgstr ""
msgid "vcard"
msgstr "vcard"
@@ -4530,6 +4558,3 @@
msgstr ""
"usted debe quitar la puesta en lÃnea de la relación %s que es aceptada y "
"puede ser cruzada"
-
-#~ msgid "add a %s"
-#~ msgstr "agregar un %s"
--- a/i18n/fr.po Wed Jun 08 15:11:45 2011 +0200
+++ b/i18n/fr.po Wed Jun 08 17:08:00 2011 +0200
@@ -4,7 +4,7 @@
msgid ""
msgstr ""
"Project-Id-Version: cubicweb 2.46.0\n"
-"PO-Revision-Date: 2011-01-03 14:35+0100\n"
+"PO-Revision-Date: 2011-04-29 12:57+0200\n"
"Last-Translator: Logilab Team <contact@logilab.fr>\n"
"Language-Team: fr <contact@logilab.fr>\n"
"Language: \n"
@@ -255,6 +255,12 @@
msgid "BaseTransition_plural"
msgstr "Transitions (abstraites)"
+msgid "BigInt"
+msgstr "Entier long"
+
+msgid "BigInt_plural"
+msgstr "Entiers longs"
+
msgid "Bookmark"
msgstr "Signet"
@@ -1881,6 +1887,9 @@
msgid "ctxtoolbar"
msgstr "barre d'outils"
+msgid "currently in synchronization"
+msgstr "en cours de synchronisation"
+
msgid "custom_workflow"
msgstr "workflow spécifique"
@@ -2423,6 +2432,9 @@
msgid "external page"
msgstr "page externe"
+msgid "facet-loading-msg"
+msgstr "en cours de traitement, merci de patienter"
+
msgid "facet.filters"
msgstr "facettes"
@@ -2607,9 +2619,6 @@
"relation générique permettant d'indiquer qu'une entité est identique à une "
"autre ressource web (voir http://www.w3.org/TR/owl-ref/#sameAs-def)."
-msgid "go back to the index page"
-msgstr "retourner sur la page d'accueil"
-
msgid "granted to groups"
msgstr "accordée aux groupes"
@@ -3218,6 +3227,12 @@
msgid "no associated permissions"
msgstr "aucune permission associée"
+msgid "no content next link"
+msgstr ""
+
+msgid "no content prev link"
+msgstr ""
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr "aucun champ à éditer spécifié pour l'entité %s"
@@ -3939,6 +3954,13 @@
msgid "synchronization-interval must be greater than 1 minute"
msgstr "synchronization-interval doit être supérieur à 1 minute"
+msgid "synchronizing"
+msgstr "synchronisation"
+
+msgctxt "CWSource"
+msgid "synchronizing"
+msgstr "synchronisation"
+
msgid "table"
msgstr "table"
@@ -3976,6 +3998,12 @@
msgid "the value \"%s\" is already used, use another one"
msgstr "la valeur \"%s\" est déjà utilisée, veuillez utiliser une autre valeur"
+msgid "there is no next page"
+msgstr ""
+
+msgid "there is no previous page"
+msgstr ""
+
msgid "this action is not reversible!"
msgstr ""
"Attention ! Cette opération va détruire les données de façon irréversible."
@@ -4076,8 +4104,8 @@
msgid "tr_count"
msgstr "n° de transition"
-msgid "transaction undone"
-msgstr "transaction annulées"
+msgid "transaction undoed"
+msgstr "transaction annulée"
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
@@ -4359,28 +4387,28 @@
msgid "value"
msgstr "valeur"
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr "la valeur %(value)s doit être %(op)s %(boundary)s"
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr "la valeur %(value)s doit être <= %(boundary)s"
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr "la valeur %(value)s doit être >= %(boundary)s"
+
msgid "value associated to this key is not editable manually"
msgstr "la valeur associée à cette clé n'est pas éditable manuellement"
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr "la valeur doit être %(op)s %(boundary)s"
-
-#, python-format
-msgid "value must be <= %(boundary)s"
-msgstr "la valeur doit être <= %(boundary)s"
+msgid "value should have maximum size of %s but found %s"
+msgstr "la taille maximum est %s mais cette valeur est de taille %s"
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr "la valeur doit être >= %(boundary)s"
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr "la valeur doit être de taille %s au maximum"
-
-#, python-format
-msgid "value should have minimum size of %s"
-msgstr "la valeur doit être de taille %s au minimum"
+msgid "value should have minimum size of %s but found %s"
+msgstr "la taille minimum est %s mais cette valeur est de taille %s"
msgid "vcard"
msgstr "vcard"
@@ -4530,66 +4558,3 @@
msgstr ""
"vous devriez enlevé la mise en ligne de la relation %s qui est supportée et "
"peut-être croisée"
-
-#~ msgid "Attributes with non default permissions:"
-#~ msgstr "Attributs ayant des permissions non-standard"
-
-#~ msgid "Entity types"
-#~ msgstr "Types d'entités"
-
-#~ msgid "Permissions for entity types"
-#~ msgstr "Permissions pour les types d'entités"
-
-#~ msgid "Permissions for relations"
-#~ msgstr "Permissions pour les relations"
-
-#~ msgid "Relation types"
-#~ msgstr "Types de relation"
-
-#~ msgid "add a %s"
-#~ msgstr "ajouter un %s"
-
-#~ msgid "am/pm calendar (month)"
-#~ msgstr "calendrier am/pm (mois)"
-
-#~ msgid "am/pm calendar (semester)"
-#~ msgstr "calendrier am/pm (semestre)"
-
-#~ msgid "am/pm calendar (week)"
-#~ msgstr "calendrier am/pm (semaine)"
-
-#~ msgid "am/pm calendar (year)"
-#~ msgstr "calendrier am/pm (année)"
-
-#~ msgid "application entities"
-#~ msgstr "entités applicatives"
-
-#~ msgid "calendar (month)"
-#~ msgstr "calendrier (mensuel)"
-
-#~ msgid "calendar (semester)"
-#~ msgstr "calendrier (semestriel)"
-
-#~ msgid "calendar (week)"
-#~ msgstr "calendrier (hebdo)"
-
-#~ msgid "calendar (year)"
-#~ msgstr "calendrier (annuel)"
-
-#~ msgid "create an index page"
-#~ msgstr "créer une page d'accueil"
-
-#~ msgid "edit the index page"
-#~ msgstr "éditer la page d'accueil"
-
-#~ msgid "schema entities"
-#~ msgstr "entités définissant le schéma"
-
-#~ msgid "schema-security"
-#~ msgstr "permissions"
-
-#~ msgid "system entities"
-#~ msgstr "entités systèmes"
-
-#~ msgid "timestamp of the latest source synchronization."
-#~ msgstr "date de la dernière synchronisation avec la source."
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.13.0_Any.py Wed Jun 08 17:08:00 2011 +0200
@@ -0,0 +1,3 @@
+sync_schema_props_perms('cw_source', syncprops=False)
+add_attribute('CWSource', 'synchronizing')
+add_entity_type('BigInt')
--- a/misc/migration/bootstrapmigration_repository.py Wed Jun 08 15:11:45 2011 +0200
+++ b/misc/migration/bootstrapmigration_repository.py Wed Jun 08 17:08:00 2011 +0200
@@ -49,7 +49,7 @@
elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0):
CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
ask_confirm=False))
- session.set_pool()
+ session.set_cnxset()
permsdict = ss.deserialize_ertype_permissions(session)
with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
--- a/misc/scripts/drop_external_entities.py Wed Jun 08 15:11:45 2011 +0200
+++ b/misc/scripts/drop_external_entities.py Wed Jun 08 17:08:00 2011 +0200
@@ -3,7 +3,7 @@
sql("DELETE FROM entities WHERE type='Int'")
-ecnx = session.pool.connection(source)
+ecnx = session.cnxset.connection(source)
for e in rql('Any X WHERE X cw_source S, S name %(name)s', {'name': source}).entities():
meta = e.cw_metainformation()
assert meta['source']['uri'] == source
--- a/rset.py Wed Jun 08 15:11:45 2011 +0200
+++ b/rset.py Wed Jun 08 17:08:00 2011 +0200
@@ -475,43 +475,57 @@
entity.eid = eid
# cache entity
req.set_entity_cache(entity)
- eschema = entity.e_schema
# try to complete the entity if there are some additional columns
if len(rowvalues) > 1:
- rqlst = self.syntax_tree()
- if rqlst.TYPE == 'select':
- # UNION query, find the subquery from which this entity has been
- # found
- select, col = rqlst.locate_subquery(col, etype, self.args)
+ eschema = entity.e_schema
+ eid_col, attr_cols, rel_cols = self._rset_structure(eschema, col)
+ entity.eid = rowvalues[eid_col]
+ for attr, col_idx in attr_cols.items():
+ entity.cw_attr_cache[attr] = rowvalues[col_idx]
+ for (rtype, role), col_idx in rel_cols.items():
+ value = rowvalues[col_idx]
+ if value is None:
+ if role == 'subject':
+ rql = 'Any Y WHERE X %s Y, X eid %s'
+ else:
+ rql = 'Any Y WHERE Y %s X, X eid %s'
+ rrset = ResultSet([], rql % (rtype, entity.eid))
+ rrset.req = req
+ else:
+ rrset = self._build_entity(row, col_idx).as_rset()
+ entity.cw_set_relation_cache(rtype, role, rrset)
+ return entity
+
+ @cached
+ def _rset_structure(self, eschema, entity_col):
+ eid_col = col = entity_col
+ rqlst = self.syntax_tree()
+ attr_cols = {}
+ rel_cols = {}
+ if rqlst.TYPE == 'select':
+ # UNION query, find the subquery from which this entity has been
+ # found
+ select, col = rqlst.locate_subquery(entity_col, eschema.type, self.args)
+ else:
+ select = rqlst
+ # take care, due to outer join support, we may find None
+ # values for non final relation
+ for i, attr, role in attr_desc_iterator(select, col, entity_col):
+ if role == 'subject':
+ rschema = eschema.subjrels[attr]
else:
- select = rqlst
- # take care, due to outer join support, we may find None
- # values for non final relation
- for i, attr, role in attr_desc_iterator(select, col, entity.cw_col):
- if role == 'subject':
- rschema = eschema.subjrels[attr]
- if rschema.final:
- if attr == 'eid':
- entity.eid = rowvalues[i]
- else:
- entity.cw_attr_cache[attr] = rowvalues[i]
- continue
+ rschema = eschema.objrels[attr]
+ if rschema.final:
+ if attr == 'eid':
+ eid_col = i
else:
- rschema = eschema.objrels[attr]
+ attr_cols[attr] = i
+ else:
rdef = eschema.rdef(attr, role)
# only keep value if it can't be multivalued
if rdef.role_cardinality(role) in '1?':
- if rowvalues[i] is None:
- if role == 'subject':
- rql = 'Any Y WHERE X %s Y, X eid %s'
- else:
- rql = 'Any Y WHERE Y %s X, X eid %s'
- rrset = ResultSet([], rql % (attr, entity.eid))
- rrset.req = req
- else:
- rrset = self._build_entity(row, i).as_rset()
- entity.cw_set_relation_cache(attr, role, rrset)
- return entity
+ rel_cols[(attr, role)] = i
+ return eid_col, attr_cols, rel_cols
@cached
def syntax_tree(self):
--- a/schema.py Wed Jun 08 15:11:45 2011 +0200
+++ b/schema.py Wed Jun 08 17:08:00 2011 +0200
@@ -544,10 +544,11 @@
rschema = self.add_relation_type(ybo.RelationType('identity'))
rschema.final = False
+ etype_name_re = r'[A-Z][A-Za-z0-9]*[a-z]+[A-Za-z0-9]*$'
def add_entity_type(self, edef):
edef.name = edef.name.encode()
edef.name = bw_normalize_etype(edef.name)
- if not re.match(r'[A-Z][A-Za-z0-9]*[a-z]+[0-9]*$', edef.name):
+ if not re.match(self.etype_name_re, edef.name):
raise BadSchemaDefinition(
'%r is not a valid name for an entity type. It should start '
'with an upper cased letter and be followed by at least a '
--- a/schemas/base.py Wed Jun 08 15:11:45 2011 +0200
+++ b/schemas/base.py Wed Jun 08 17:08:00 2011 +0200
@@ -21,7 +21,8 @@
_ = unicode
from yams.buildobjs import (EntityType, RelationType, RelationDefinition,
- SubjectRelation, String, Datetime, Password, Interval)
+ SubjectRelation,
+ String, Datetime, Password, Interval, Boolean)
from cubicweb.schema import (
RQLConstraint, WorkflowableEntityType, ERQLExpression, RRQLExpression,
PUB_SYSTEM_ENTITY_PERMS, PUB_SYSTEM_REL_PERMS, PUB_SYSTEM_ATTR_PERMS)
@@ -265,7 +266,8 @@
url = String(description=_('URLs from which content will be imported. You can put one url per line'))
parser = String(description=_('parser to use to extract entities from content retrieved at given URLs.'))
latest_retrieval = Datetime(description=_('latest synchronization time'))
-
+ synchronizing = Boolean(description=_('currently in synchronization'),
+ default=False)
ENTITY_MANAGERS_PERMISSIONS = {
'read': ('managers',),
@@ -307,8 +309,8 @@
class cw_source(RelationDefinition):
__permissions__ = {
'read': ('managers', 'users', 'guests'),
- 'add': (),
- 'delete': (),
+ 'add': ('managers',),
+ 'delete': ('managers',),
}
subject = '*'
object = 'CWSource'
--- a/server/__init__.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/__init__.py Wed Jun 08 17:08:00 2011 +0200
@@ -230,7 +230,7 @@
for path in reversed(paths):
mhandler.exec_event_script('pre%s' % event, path)
# enter instance'schema into the database
- session.set_pool()
+ session.set_cnxset()
serialize_schema(session, schema)
# execute cubicweb's post<event> script
mhandler.exec_event_script('post%s' % event)
--- a/server/checkintegrity.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/checkintegrity.py Wed Jun 08 17:08:00 2011 +0200
@@ -101,7 +101,7 @@
# deactivate modification_date hook since we don't want them
# to be updated due to the reindexation
repo = session.repo
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
dbhelper = session.repo.system_source.dbhelper
if not dbhelper.has_fti_table(cursor):
print 'no text index table'
@@ -356,7 +356,7 @@
using given user and password to locally connect to the repository
(no running cubicweb server needed)
"""
- session = repo._get_session(cnx.sessionid, setpool=True)
+ session = repo._get_session(cnx.sessionid, setcnxset=True)
# yo, launch checks
if checks:
eids_cache = {}
@@ -372,6 +372,6 @@
print 'WARNING: Diagnostic run, nothing has been corrected'
if reindex:
cnx.rollback()
- session.set_pool()
+ session.set_cnxset()
reindex_entities(repo.schema, session, withpb=withpb)
cnx.commit()
--- a/server/edition.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/edition.py Wed Jun 08 17:08:00 2011 +0200
@@ -68,6 +68,11 @@
super(EditedEntity, self).__delitem__(attr)
self.entity.cw_attr_cache.pop(attr, None)
+ def __copy__(self):
+ # default copy protocol fails in EditedEntity.__setitem__ because
+ # copied entity has no skip_security attribute at this point
+ return EditedEntity(self.entity, **self)
+
def pop(self, attr, *args):
# don't update skip_security by design (think to storage api)
assert not self.saved, 'too late to modify edited attributes'
--- a/server/hook.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/hook.py Wed Jun 08 17:08:00 2011 +0200
@@ -248,7 +248,7 @@
from logging import getLogger
from itertools import chain
-from logilab.common.decorators import classproperty
+from logilab.common.decorators import classproperty, cached
from logilab.common.deprecation import deprecated, class_renamed
from logilab.common.logging_ext import set_log_methods
@@ -257,7 +257,7 @@
from cubicweb.cwvreg import CWRegistry, VRegistry
from cubicweb.selectors import (objectify_selector, lltrace, ExpectedValueSelector,
is_instance)
-from cubicweb.appobject import AppObject
+from cubicweb.appobject import AppObject, NotSelector, OrSelector
from cubicweb.server.session import security_enabled
ENTITIES_HOOKS = set(('before_add_entity', 'after_add_entity',
@@ -318,15 +318,83 @@
else:
entities = []
eids_from_to = []
+ pruned = self.get_pruned_hooks(session, event,
+ entities, eids_from_to, kwargs)
# by default, hooks are executed with security turned off
with security_enabled(session, read=False):
for _kwargs in _iter_kwargs(entities, eids_from_to, kwargs):
- hooks = sorted(self.possible_objects(session, **_kwargs),
+ hooks = sorted(self.filtered_possible_objects(pruned, session, **_kwargs),
key=lambda x: x.order)
with security_enabled(session, write=False):
for hook in hooks:
- #print hook.category, hook.__regid__
- hook()
+ hook()
+
+ def get_pruned_hooks(self, session, event, entities, eids_from_to, kwargs):
+ """return a set of hooks that should not be considered by filtered_possible objects
+
+ the idea is to make a first pass over all the hooks in the
+ registry and to mark put some of them in a pruned list. The
+ pruned hooks are the one which:
+
+ * are disabled at the session level
+ * have a match_rtype or an is_instance selector which does not
+ match the rtype / etype of the relations / entities for
+ which we are calling the hooks. This works because the
+ repository calls the hooks grouped by rtype or by etype when
+ using the entities or eids_to_from keyword arguments
+
+ Only hooks with a simple selector or an AndSelector of simple
+ selectors are considered for disabling.
+
+ """
+ if 'entity' in kwargs:
+ entities = [kwargs['entity']]
+ if len(entities):
+ look_for_selector = is_instance
+ etype = entities[0].__regid__
+ elif 'rtype' in kwargs:
+ look_for_selector = match_rtype
+ etype = None
+ else: # nothing to prune, how did we get there ???
+ return set()
+ cache_key = (event, kwargs.get('rtype'), etype)
+ pruned = session.pruned_hooks_cache.get(cache_key)
+ if pruned is not None:
+ return pruned
+ pruned = set()
+ session.pruned_hooks_cache[cache_key] = pruned
+ if look_for_selector is not None:
+ for id, hooks in self.iteritems():
+ for hook in hooks:
+ enabled_cat, main_filter = hook.filterable_selectors()
+ if enabled_cat is not None:
+ if not enabled_cat(hook, session):
+ pruned.add(hook)
+ continue
+ if main_filter is not None:
+ if isinstance(main_filter, match_rtype) and \
+ (main_filter.frometypes is not None or \
+ main_filter.toetypes is not None):
+ continue
+ first_kwargs = _iter_kwargs(entities, eids_from_to, kwargs).next()
+ if not main_filter(hook, session, **first_kwargs):
+ pruned.add(hook)
+ return pruned
+
+
+ def filtered_possible_objects(self, pruned, *args, **kwargs):
+ for appobjects in self.itervalues():
+ if pruned:
+ filtered_objects = [obj for obj in appobjects if obj not in pruned]
+ if not filtered_objects:
+ continue
+ else:
+ filtered_objects = appobjects
+ obj = self._select_best(filtered_objects,
+ *args, **kwargs)
+ if obj is None:
+ continue
+ yield obj
class HooksManager(object):
def __init__(self, vreg):
@@ -464,6 +532,15 @@
# stop pylint from complaining about missing attributes in Hooks classes
eidfrom = eidto = entity = rtype = None
+ @classmethod
+ @cached
+ def filterable_selectors(cls):
+ search = cls.__select__.search_selector
+ if search((NotSelector, OrSelector)):
+ return None, None
+ enabled_cat = search(enabled_category)
+ main_filter = search((is_instance, match_rtype))
+ return enabled_cat, main_filter
@classmethod
def check_events(cls):
@@ -653,8 +730,8 @@
operation. These keyword arguments will be accessible as attributes from the
operation instance.
- An operation is triggered on connections pool events related to
- commit / rollback transations. Possible events are:
+ An operation is triggered on connections set events related to commit /
+ rollback transations. Possible events are:
* `precommit`:
@@ -728,7 +805,7 @@
getattr(self, event)()
def precommit_event(self):
- """the observed connections pool is preparing a commit"""
+ """the observed connections set is preparing a commit"""
def revertprecommit_event(self):
"""an error went when pre-commiting this operation or a later one
@@ -738,14 +815,13 @@
"""
def rollback_event(self):
- """the observed connections pool has been rollbacked
+ """the observed connections set has been rollbacked
- do nothing by default, the operation will just be removed from the pool
- operation list
+ do nothing by default
"""
def postcommit_event(self):
- """the observed connections pool has committed"""
+ """the observed connections set has committed"""
@property
@deprecated('[3.6] use self.session.user')
@@ -1021,7 +1097,7 @@
data_key = 'neweids'
def rollback_event(self):
- """the observed connections pool has been rollbacked,
+ """the observed connections set has been rollbacked,
remove inserted eid from repository type/source cache
"""
try:
@@ -1035,7 +1111,7 @@
"""
data_key = 'pendingeids'
def postcommit_event(self):
- """the observed connections pool has been rollbacked,
+ """the observed connections set has been rollbacked,
remove inserted eid from repository type/source cache
"""
try:
--- a/server/migractions.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/migractions.py Wed Jun 08 17:08:00 2011 +0200
@@ -201,7 +201,6 @@
versions = repo.get_versions()
for cube, version in versions.iteritems():
version_file.write('%s %s\n' % (cube, version))
-
if not failed:
bkup = tarfile.open(backupfile, 'w|gz')
for filename in os.listdir(tmpdir):
@@ -242,7 +241,7 @@
written_format = format_file.readline().strip()
if written_format in ('portable', 'native'):
format = written_format
- self.config.open_connections_pools = False
+ self.config.init_cnxset_pool = False
repo = self.repo_connect()
for source in repo.sources:
if systemonly and source.uri != 'system':
@@ -255,7 +254,7 @@
raise SystemExit(1)
shutil.rmtree(tmpdir)
# call hooks
- repo.open_connections_pools()
+ repo.init_cnxset_pool()
repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
print '-> database restored.'
@@ -288,7 +287,7 @@
except (KeyboardInterrupt, EOFError):
print 'aborting...'
sys.exit(0)
- self.session.keep_pool_mode('transaction')
+ self.session.keep_cnxset_mode('transaction')
self.session.data['rebuild-infered'] = False
return self._cnx
@@ -296,10 +295,10 @@
def session(self):
if self.config is not None:
session = self.repo._get_session(self.cnx.sessionid)
- if session.pool is None:
+ if session.cnxset is None:
session.set_read_security(False)
session.set_write_security(False)
- session.set_pool()
+ session.set_cnxset()
return session
# no access to session on remote instance
return None
@@ -308,13 +307,13 @@
if hasattr(self, '_cnx'):
self._cnx.commit()
if self.session:
- self.session.set_pool()
+ self.session.set_cnxset()
def rollback(self):
if hasattr(self, '_cnx'):
self._cnx.rollback()
if self.session:
- self.session.set_pool()
+ self.session.set_cnxset()
def rqlexecall(self, rqliter, ask_confirm=False):
for rql, kwargs in rqliter:
@@ -1360,7 +1359,7 @@
def _cw(self):
session = self.session
if session is not None:
- session.set_pool()
+ session.set_cnxset()
return session
return self.cnx.request()
--- a/server/pool.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/pool.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,19 +15,18 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""CubicWeb server connections pool : the repository has a limited number of
-connections pools, each of them dealing with a set of connections on each source
-used by the repository. A connections pools (`ConnectionsPool`) is an
-abstraction for a group of connection to each source.
+"""CubicWeb server connections set : the repository has a limited number of
+:class:`ConnectionsSet` (defined in configuration, default to 4). Each of them
+hold a connection for each source used by the repository.
"""
__docformat__ = "restructuredtext en"
import sys
-class ConnectionsPool(object):
+class ConnectionsSet(object):
"""handle connections on a set of sources, at some point associated to a
- user session
+ :class:`Session`
"""
def __init__(self, sources):
@@ -81,9 +80,9 @@
self.reconnect(source)
def close(self, i_know_what_i_do=False):
- """close all connections in the pool"""
+ """close all connections in the set"""
if i_know_what_i_do is not True: # unexpected closing safety belt
- raise RuntimeError('pool shouldn\'t be closed')
+ raise RuntimeError('connections set shouldn\'t be closed')
for cu in self._cursors.values():
try:
cu.close()
@@ -97,17 +96,17 @@
# internals ###############################################################
- def pool_set(self):
- """pool is being set"""
+ def cnxset_set(self):
+ """connections set is being set on a session"""
self.check_connections()
- def pool_reset(self):
- """pool is being reseted"""
+ def cnxset_freed(self):
+ """connections set is being freed from a session"""
for source, cnx in self.source_cnxs.values():
- source.pool_reset(cnx)
+ source.cnxset_freed(cnx)
def sources(self):
- """return the source objects handled by this pool"""
+ """return the source objects handled by this connections set"""
# implementation details of flying insert requires the system source
# first
yield self.source_cnxs['system'][0]
--- a/server/querier.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/querier.py Wed Jun 08 17:08:00 2011 +0200
@@ -169,7 +169,7 @@
# session executing the query
self.session = session
# quick reference to the system source
- self.syssource = session.pool.source('system')
+ self.syssource = session.cnxset.source('system')
# execution steps
self.steps = []
# index of temporary tables created during execution
@@ -734,8 +734,8 @@
# transaction must been rollbacked
#
# notes:
- # * we should not reset the pool here, since we don't want the
- # session to loose its pool during processing
+ # * we should not reset the connections set here, since we don't want the
+ # session to loose it during processing
# * don't rollback if we're in the commit process, will be handled
# by the session
if session.commit_state is None:
--- a/server/repository.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/repository.py Wed Jun 08 17:08:00 2011 +0200
@@ -160,13 +160,13 @@
self.sources_by_uri = {'system': self.system_source}
# querier helper, need to be created after sources initialization
self.querier = querier.QuerierHelper(self, self.schema)
- # cache eid -> type / source
+ # cache eid -> (type, source, extid)
self._type_source_cache = {}
# cache (extid, source uri) -> eid
self._extid_cache = {}
- # open some connections pools
- if config.open_connections_pools:
- self.open_connections_pools()
+ # open some connections set
+ if config.init_cnxset_pool:
+ self.init_cnxset_pool()
@onevent('after-registry-reload', self)
def fix_user_classes(self):
usercls = self.vreg['etypes'].etype_class('CWUser')
@@ -174,10 +174,10 @@
if not isinstance(session.user, InternalManager):
session.user.__class__ = usercls
- def open_connections_pools(self):
+ def init_cnxset_pool(self):
config = self.config
- self._available_pools = Queue.Queue()
- self._available_pools.put_nowait(pool.ConnectionsPool(self.sources))
+ self._cnxsets_pool = Queue.Queue()
+ self._cnxsets_pool.put_nowait(pool.ConnectionsSet(self.sources))
if config.quick_start:
# quick start, usually only to get a minimal repository to get cubes
# information (eg dump/restore/...)
@@ -219,14 +219,14 @@
# configurate tsearch according to postgres version
for source in self.sources:
source.init_creating()
- # close initialization pool and reopen fresh ones for proper
+ # close initialization connetions set and reopen fresh ones for proper
# initialization now that we know cubes
- self._get_pool().close(True)
- # list of available pools (we can't iterate on Queue instance)
- self.pools = []
+ self._get_cnxset().close(True)
+ # list of available_cnxsets (we can't iterate on Queue instance)
+ self.cnxsets = []
for i in xrange(config['connections-pool-size']):
- self.pools.append(pool.ConnectionsPool(self.sources))
- self._available_pools.put_nowait(self.pools[-1])
+ self.cnxsets.append(pool.ConnectionsSet(self.sources))
+ self._cnxsets_pool.put_nowait(self.cnxsets[-1])
if config.quick_start:
config.init_cubes(self.get_cubes())
self.hm = hook.HooksManager(self.vreg)
@@ -249,7 +249,7 @@
self.sources_by_eid[sourceent.eid] = self.system_source
self.system_source.init(True, sourceent)
continue
- self.add_source(sourceent, add_to_pools=False)
+ self.add_source(sourceent, add_to_cnxsets=False)
finally:
session.close()
@@ -258,7 +258,7 @@
'can_cross_relation', 'rel_type_sources'):
clear_cache(self, cache)
- def add_source(self, sourceent, add_to_pools=True):
+ def add_source(self, sourceent, add_to_cnxsets=True):
source = self.get_source(sourceent.type, sourceent.name,
sourceent.host_config, sourceent.eid)
self.sources_by_eid[sourceent.eid] = source
@@ -266,15 +266,15 @@
if self.config.source_enabled(source):
# call source's init method to complete their initialisation if
# needed (for instance looking for persistent configuration using an
- # internal session, which is not possible until pools have been
+ # internal session, which is not possible until connections sets have been
# initialized)
source.init(True, sourceent)
if not source.copy_based_source:
self.sources.append(source)
self.querier.set_planner()
- if add_to_pools:
- for pool in self.pools:
- pool.add_source(source)
+ if add_to_cnxsets:
+ for cnxset in self.cnxsets:
+ cnxset.add_source(source)
else:
source.init(False, sourceent)
self._clear_planning_caches()
@@ -285,8 +285,8 @@
if self.config.source_enabled(source) and not source.copy_based_source:
self.sources.remove(source)
self.querier.set_planner()
- for pool in self.pools:
- pool.remove_source(source)
+ for cnxset in self.cnxsets:
+ cnxset.remove_source(source)
self._clear_planning_caches()
def get_source(self, type, uri, source_config, eid=None):
@@ -373,25 +373,25 @@
t.start()
#@locked
- def _get_pool(self):
+ def _get_cnxset(self):
try:
- return self._available_pools.get(True, timeout=5)
+ return self._cnxsets_pool.get(True, timeout=5)
except Queue.Empty:
- raise Exception('no pool available after 5 secs, probably either a '
+ raise Exception('no connections set available after 5 secs, probably either a '
'bug in code (too many uncommited/rollbacked '
'connections) or too much load on the server (in '
'which case you can try to set a bigger '
- 'connections pools size)')
+ 'connections pool size)')
- def _free_pool(self, pool):
- self._available_pools.put_nowait(pool)
+ def _free_cnxset(self, cnxset):
+ self._cnxsets_pool.put_nowait(cnxset)
def pinfo(self):
- # XXX: session.pool is accessed from a local storage, would be interesting
- # to see if there is a pool set in any thread specific data)
- return '%s: %s (%s)' % (self._available_pools.qsize(),
+ # XXX: session.cnxset is accessed from a local storage, would be interesting
+ # to see if there is a cnxset set in any thread specific data)
+ return '%s: %s (%s)' % (self._cnxsets_pool.qsize(),
','.join(session.user.login for session in self._sessions.values()
- if session.pool),
+ if session.cnxset),
threading.currentThread())
def shutdown(self):
"""called on server stop event to properly close opened sessions and
@@ -414,12 +414,12 @@
or self.config.quick_start):
self.hm.call_hooks('server_shutdown', repo=self)
self.close_sessions()
- while not self._available_pools.empty():
- pool = self._available_pools.get_nowait()
+ while not self._cnxsets_pool.empty():
+ cnxset = self._cnxsets_pool.get_nowait()
try:
- pool.close(True)
+ cnxset.close(True)
except:
- self.exception('error while closing %s' % pool)
+ self.exception('error while closing %s' % cnxset)
continue
if self.pyro_registered:
if self._use_pyrons():
@@ -501,7 +501,7 @@
results['nb_open_sessions'] = len(self._sessions)
results['nb_active_threads'] = threading.activeCount()
results['looping_tasks'] = ', '.join(str(t) for t in self._looping_tasks)
- results['available_pools'] = self._available_pools.qsize()
+ results['available_cnxsets'] = self._cnxsets_pool.qsize()
results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
return results
@@ -543,9 +543,9 @@
_, sourceuri, extid = self.type_and_source_from_eid(foreid)
if sourceuri == 'system':
return self.config[option]
- pool = self._get_pool()
+ cnxset = self._get_cnxset()
try:
- cnx = pool.connection(sourceuri)
+ cnx = cnxset.connection(sourceuri)
# needed to check connection is valid and usable by the current
# thread
newcnx = self.sources_by_uri[sourceuri].check_connection(cnx)
@@ -553,7 +553,7 @@
cnx = newcnx
return cnx.get_option_value(option, extid)
finally:
- self._free_pool(pool)
+ self._free_cnxset(cnxset)
@cached
def get_versions(self, checkversions=False):
@@ -726,7 +726,7 @@
* build_descr is a flag indicating if the description should be
built on select queries
"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
try:
rset = self.querier.execute(session, rqlstring, args,
@@ -752,21 +752,21 @@
self.exception('unexpected error while executing %s with %s', rqlstring, args)
raise
finally:
- session.reset_pool()
+ session.free_cnxset()
def describe(self, sessionid, eid, txid=None):
"""return a tuple (type, source, extid) for the entity with id <eid>"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.type_and_source_from_eid(eid, session)
finally:
- session.reset_pool()
+ session.free_cnxset()
def check_session(self, sessionid):
"""raise `BadConnectionId` if the connection is no more valid, else
return its latest activity timestamp.
"""
- return self._get_session(sessionid, setpool=False).timestamp
+ return self._get_session(sessionid, setcnxset=False).timestamp
def get_shared_data(self, sessionid, key, default=None, pop=False, txdata=False):
"""return value associated to key in the session's data dictionary or
@@ -777,7 +777,7 @@
If key isn't defined in the dictionnary, value specified by the
`default` argument will be returned.
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
return session.get_shared_data(key, default, pop, txdata)
def set_shared_data(self, sessionid, key, value, txdata=False):
@@ -787,7 +787,7 @@
transaction's data which are cleared on commit/rollback of the current
transaction.
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
session.set_shared_data(key, value, txdata)
def commit(self, sessionid, txid=None):
@@ -816,10 +816,10 @@
def close(self, sessionid, txid=None, checkshuttingdown=True):
"""close the session with the given id"""
- session = self._get_session(sessionid, setpool=True, txid=txid,
+ session = self._get_session(sessionid, setcnxset=True, txid=txid,
checkshuttingdown=checkshuttingdown)
# operation uncommited before close are rollbacked before hook is called
- session.rollback(reset_pool=False)
+ session.rollback(free_cnxset=False)
self.hm.call_hooks('session_close', session)
# commit session at this point in case write operation has been done
# during `session_close` hooks
@@ -834,7 +834,7 @@
* update user information on each user's request (i.e. groups and
custom properties)
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
if props is not None:
self.set_session_props(sessionid, props)
user = session.user
@@ -846,43 +846,43 @@
* update user information on each user's request (i.e. groups and
custom properties)
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
for prop, value in props.items():
session.change_property(prop, value)
def undoable_transactions(self, sessionid, ueid=None, txid=None,
**actionfilters):
"""See :class:`cubicweb.dbapi.Connection.undoable_transactions`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.undoable_transactions(session, ueid,
**actionfilters)
finally:
- session.reset_pool()
+ session.free_cnxset()
def transaction_info(self, sessionid, txuuid, txid=None):
"""See :class:`cubicweb.dbapi.Connection.transaction_info`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.tx_info(session, txuuid)
finally:
- session.reset_pool()
+ session.free_cnxset()
def transaction_actions(self, sessionid, txuuid, public=True, txid=None):
"""See :class:`cubicweb.dbapi.Connection.transaction_actions`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.tx_actions(session, txuuid, public)
finally:
- session.reset_pool()
+ session.free_cnxset()
def undo_transaction(self, sessionid, txuuid, txid=None):
"""See :class:`cubicweb.dbapi.Connection.undo_transaction`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.undo_transaction(session, txuuid)
finally:
- session.reset_pool()
+ session.free_cnxset()
# public (inter-repository) interface #####################################
@@ -934,14 +934,14 @@
"""return a dbapi like connection/cursor using internal user which
have every rights on the repository. You'll *have to* commit/rollback
or close (rollback implicitly) the session once the job's done, else
- you'll leak connections pool up to the time where no more pool is
+ you'll leak connections set up to the time where no one is
available, causing irremediable freeze...
"""
session = InternalSession(self, cnxprops)
- session.set_pool()
+ session.set_cnxset()
return session
- def _get_session(self, sessionid, setpool=False, txid=None,
+ def _get_session(self, sessionid, setcnxset=False, txid=None,
checkshuttingdown=True):
"""return the user associated to the given session identifier"""
if checkshuttingdown and self.shutting_down:
@@ -950,9 +950,9 @@
session = self._sessions[sessionid]
except KeyError:
raise BadConnectionId('No such session %s' % sessionid)
- if setpool:
- session.set_tx_data(txid) # must be done before set_pool
- session.set_pool()
+ if setcnxset:
+ session.set_tx_data(txid) # must be done before set_cnxset
+ session.set_cnxset()
return session
# data sources handling ###################################################
@@ -970,15 +970,15 @@
except KeyError:
if session is None:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
else:
- reset_pool = False
+ free_cnxset = False
try:
etype, uri, extid = self.system_source.eid_type_source(session,
eid)
finally:
- if reset_pool:
- session.reset_pool()
+ if free_cnxset:
+ session.free_cnxset()
self._type_source_cache[eid] = (etype, uri, extid)
if uri != 'system':
self._extid_cache[(extid, uri)] = eid
@@ -1032,23 +1032,44 @@
def extid2eid(self, source, extid, etype, session=None, insert=True,
sourceparams=None):
- """get eid from a local id. An eid is attributed if no record is found"""
+ """Return eid from a local id. If the eid is a negative integer, that
+ means the entity is known but has been copied back to the system source
+ hence should be ignored.
+
+ If no record is found, ie the entity is not known yet:
+
+ 1. an eid is attributed
+
+ 2. the source's :meth:`before_entity_insertion` method is called to
+ build the entity instance
+
+ 3. unless source's :attr:`should_call_hooks` tell otherwise,
+ 'before_add_entity' hooks are called
+
+ 4. record is added into the system source
+
+ 5. the source's :meth:`after_entity_insertion` method is called to
+ complete building of the entity instance
+
+ 6. unless source's :attr:`should_call_hooks` tell otherwise,
+ 'before_add_entity' hooks are called
+ """
uri = 'system' if source.copy_based_source else source.uri
cachekey = (extid, uri)
try:
return self._extid_cache[cachekey]
except KeyError:
pass
- reset_pool = False
+ free_cnxset = False
if session is None:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
eid = self.system_source.extid2eid(session, uri, extid)
if eid is not None:
self._extid_cache[cachekey] = eid
self._type_source_cache[eid] = (etype, uri, extid)
- if reset_pool:
- session.reset_pool()
+ if free_cnxset:
+ session.free_cnxset()
return eid
if not insert:
return
@@ -1060,7 +1081,7 @@
# processing a commit, we have to use another one
if not session.is_internal_session:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
try:
eid = self.system_source.create_eid(session)
self._extid_cache[cachekey] = eid
@@ -1074,10 +1095,10 @@
source.after_entity_insertion(session, extid, entity, sourceparams)
if source.should_call_hooks:
self.hm.call_hooks('after_add_entity', session, entity=entity)
- session.commit(reset_pool)
+ session.commit(free_cnxset)
return eid
except:
- session.rollback(reset_pool)
+ session.rollback(free_cnxset)
raise
def add_info(self, session, entity, source, extid=None, complete=True):
--- a/server/schemaserial.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/schemaserial.py Wed Jun 08 17:08:00 2011 +0200
@@ -88,7 +88,7 @@
repo = session.repo
dbhelper = repo.system_source.dbhelper
# XXX bw compat (3.6 migration)
- sqlcu = session.pool['system']
+ sqlcu = session.cnxset['system']
sqlcu.execute("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
if sqlcu.fetchall():
sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
--- a/server/serverconfig.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/serverconfig.py Wed Jun 08 17:08:00 2011 +0200
@@ -130,7 +130,7 @@
('connections-pool-size',
{'type' : 'int',
'default': 4,
- 'help': 'size of the connections pools. Each source supporting multiple \
+ 'help': 'size of the connections pool. Each source supporting multiple \
connections will have this number of opened connections.',
'group': 'main', 'level': 3,
}),
@@ -209,9 +209,9 @@
}),
) + CubicWebConfiguration.options)
- # should we open connections pools (eg connect to sources). This is usually
- # necessary...
- open_connections_pools = True
+ # should we init the connections pool (eg connect to sources). This is
+ # usually necessary...
+ init_cnxset_pool = True
# read the schema from the database
read_instance_schema = True
@@ -255,7 +255,7 @@
# configuration file (#16102)
@cached
def read_sources_file(self):
- return read_config(self.sources_file())
+ return read_config(self.sources_file(), raise_if_unreadable=True)
def sources(self):
"""return a dictionnaries containing sources definitions indexed by
--- a/server/serverctl.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/serverctl.py Wed Jun 08 17:08:00 2011 +0200
@@ -24,6 +24,7 @@
# completion). So import locally in command helpers.
import sys
import os
+import logging
from logilab.common import nullobject
from logilab.common.configuration import Configuration
@@ -637,8 +638,7 @@
appid = args[0]
debug = self['debug']
if sys.platform == 'win32' and not debug:
- from logging import getLogger
- logger = getLogger('cubicweb.ctl')
+ logger = logging.getLogger('cubicweb.ctl')
logger.info('Forcing debug mode on win32 platform')
debug = True
config = ServerConfiguration.config_for(appid, debugmode=debug)
@@ -970,7 +970,7 @@
appid = args[0]
config = ServerConfiguration.config_for(appid)
repo, cnx = repo_cnx(config)
- session = repo._get_session(cnx.sessionid, setpool=True)
+ session = repo._get_session(cnx.sessionid, setcnxset=True)
reindex_entities(repo.schema, session)
cnx.commit()
@@ -995,11 +995,43 @@
mih.cmd_synchronize_schema()
+class SynchronizeSourceCommand(Command):
+ """Force a source synchronization.
+
+ <instance>
+ the identifier of the instance
+ <source>
+ the name of the source to synchronize.
+ """
+ name = 'source-sync'
+ arguments = '<instance> <source>'
+ min_args = max_args = 2
+
+ def run(self, args):
+ config = ServerConfiguration.config_for(args[0])
+ config.global_set_option('log-file', None)
+ config.log_format = '%(levelname)s %(name)s: %(message)s'
+ logger = logging.getLogger('cubicweb.sources')
+ logger.setLevel(logging.INFO)
+ # only retrieve cnx to trigger authentication, close it right away
+ repo, cnx = repo_cnx(config)
+ cnx.close()
+ try:
+ source = repo.sources_by_uri[args[1]]
+ except KeyError:
+ raise ExecutionError('no source named %r' % args[1])
+ session = repo.internal_session()
+ stats = source.pull_data(session, force=True, raise_on_error=True)
+ for key, val in stats.iteritems():
+ if val:
+ print key, ':', val
+
+
for cmdclass in (CreateInstanceDBCommand, InitInstanceCommand,
GrantUserOnInstanceCommand, ResetAdminPasswordCommand,
StartRepositoryCommand,
DBDumpCommand, DBRestoreCommand, DBCopyCommand,
AddSourceCommand, CheckRepositoryCommand, RebuildFTICommand,
- SynchronizeInstanceSchemaCommand,
+ SynchronizeInstanceSchemaCommand, SynchronizeSourceCommand
):
CWCTL.register(cmdclass)
--- a/server/session.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/session.py Wed Jun 08 17:08:00 2011 +0200
@@ -117,21 +117,13 @@
self.categories = categories
def __enter__(self):
- self.oldmode = self.session.set_hooks_mode(self.mode)
- if self.mode is self.session.HOOKS_DENY_ALL:
- self.changes = self.session.enable_hook_categories(*self.categories)
- else:
- self.changes = self.session.disable_hook_categories(*self.categories)
+ self.oldmode, self.changes = self.session.init_hooks_mode_categories(
+ self.mode, self.categories)
def __exit__(self, exctype, exc, traceback):
- if self.changes:
- if self.mode is self.session.HOOKS_DENY_ALL:
- self.session.disable_hook_categories(*self.changes)
- else:
- self.session.enable_hook_categories(*self.changes)
- self.session.set_hooks_mode(self.oldmode)
+ self.session.reset_hooks_mode_categories(self.oldmode, self.mode, self.changes)
-INDENT = ''
+
class security_enabled(object):
"""context manager to control security w/ session.execute, since by
default security is disabled on queries executed on the repository
@@ -143,33 +135,90 @@
self.write = write
def __enter__(self):
-# global INDENT
- if self.read is not None:
- self.oldread = self.session.set_read_security(self.read)
-# print INDENT + 'read', self.read, self.oldread
- if self.write is not None:
- self.oldwrite = self.session.set_write_security(self.write)
-# print INDENT + 'write', self.write, self.oldwrite
-# INDENT += ' '
+ self.oldread, self.oldwrite = self.session.init_security(
+ self.read, self.write)
def __exit__(self, exctype, exc, traceback):
-# global INDENT
-# INDENT = INDENT[:-2]
- if self.read is not None:
- self.session.set_read_security(self.oldread)
-# print INDENT + 'reset read to', self.oldread
- if self.write is not None:
- self.session.set_write_security(self.oldwrite)
-# print INDENT + 'reset write to', self.oldwrite
+ self.session.reset_security(self.oldread, self.oldwrite)
class TransactionData(object):
def __init__(self, txid):
self.transactionid = txid
+ self.ctx_count = 0
+
class Session(RequestSessionBase):
- """tie session id, user, connections pool and other session data all
- together
+ """Repository usersession, tie a session id, user, connections set and
+ other session data all together.
+
+ About session storage / transactions
+ ------------------------------------
+
+ Here is a description of internal session attributes. Besides :attr:`data`
+ and :attr:`transaction_data`, you should not have to use attributes
+ described here but higher level APIs.
+
+ :attr:`data` is a dictionary containing shared data, used to communicate
+ extra information between the client and the repository
+
+ :attr:`_tx_data` is a dictionary of :class:`TransactionData` instance, one
+ for each running transaction. The key is the transaction id. By default
+ the transaction id is the thread name but it can be otherwise (per dbapi
+ cursor for instance, or per thread name *from another process*).
+
+ :attr:`__threaddata` is a thread local storage whose `txdata` attribute
+ refers to the proper instance of :class:`TransactionData` according to the
+ transaction.
+
+ :attr:`_threads_in_transaction` is a set of (thread, connections set)
+ referencing threads that currently hold a connections set for the session.
+
+ You should not have to use neither :attr:`_txdata` nor :attr:`__threaddata`,
+ simply access transaction data transparently through the :attr:`_threaddata`
+ property. Also, you usually don't have to access it directly since current
+ transaction's data may be accessed/modified through properties / methods:
+
+ :attr:`transaction_data`, similarly to :attr:`data`, is a dictionary
+ containing some shared data that should be cleared at the end of the
+ transaction. Hooks and operations may put arbitrary data in there, and
+ this may also be used as a communication channel between the client and
+ the repository.
+
+ :attr:`cnxset`, the connections set to use to execute queries on sources.
+ During a transaction, the connection set may be freed so that is may be
+ used by another session as long as no writing is done. This means we can
+ have multiple sessions with a reasonably low connections set pool size.
+
+ :attr:`mode`, string telling the connections set handling mode, may be one
+ of 'read' (connections set may be freed), 'write' (some write was done in
+ the connections set, it can't be freed before end of the transaction),
+ 'transaction' (we want to keep the connections set during all the
+ transaction, with or without writing)
+
+ :attr:`pending_operations`, ordered list of operations to be processed on
+ commit/rollback
+
+ :attr:`commit_state`, describing the transaction commit state, may be one
+ of None (not yet committing), 'precommit' (calling precommit event on
+ operations), 'postcommit' (calling postcommit event on operations),
+ 'uncommitable' (some :exc:`ValidationError` or :exc:`Unauthorized` error
+ has been raised during the transaction and so it must be rollbacked).
+
+ :attr:`read_security` and :attr:`write_security`, boolean flags telling if
+ read/write security is currently activated.
+
+ :attr:`hooks_mode`, may be either `HOOKS_ALLOW_ALL` or `HOOKS_DENY_ALL`.
+
+ :attr:`enabled_hook_categories`, when :attr:`hooks_mode` is
+ `HOOKS_DENY_ALL`, this set contains hooks categories that are enabled.
+
+ :attr:`disabled_hook_categories`, when :attr:`hooks_mode` is
+ `HOOKS_ALLOW_ALL`, this set contains hooks categories that are disabled.
+
+
+ :attr:`running_dbapi_query`, boolean flag telling if the executing query
+ is coming from a dbapi connection or is a query from within the repository
"""
is_internal_session = False
@@ -238,7 +287,10 @@
"""return a fake request/session using specified user"""
session = Session(user, self.repo)
threaddata = session._threaddata
- threaddata.pool = self.pool
+ threaddata.cnxset = self.cnxset
+ # we attributed a connections set, need to update ctx_count else it will be freed
+ # while undesired
+ threaddata.ctx_count = 1
# share pending_operations, else operation added in the hi-jacked
# session such as SendMailOp won't ever be processed
threaddata.pending_operations = self.pending_operations
@@ -263,7 +315,7 @@
def add_relations(self, relations):
'''set many relation using a shortcut similar to the one in add_relation
-
+
relations is a list of 2-uples, the first element of each
2-uple is the rtype, and the second is a list of (fromeid,
toeid) tuples
@@ -380,14 +432,14 @@
"""return a sql cursor on the system database"""
if sql.split(None, 1)[0].upper() != 'SELECT':
self.mode = 'write'
- source = self.pool.source('system')
+ source = self.cnxset.source('system')
try:
return source.doexec(self, sql, args, rollback=rollback_on_failure)
except (source.OperationalError, source.InterfaceError):
if not rollback_on_failure:
raise
source.warning("trying to reconnect")
- self.pool.reconnect(source)
+ self.cnxset.reconnect(source)
return source.doexec(self, sql, args, rollback=rollback_on_failure)
def set_language(self, language):
@@ -438,6 +490,29 @@
def security_enabled(self, read=False, write=False):
return security_enabled(self, read=read, write=write)
+ def init_security(self, read, write):
+ if read is None:
+ oldread = None
+ else:
+ oldread = self.set_read_security(read)
+ if write is None:
+ oldwrite = None
+ else:
+ oldwrite = self.set_write_security(write)
+ self._threaddata.ctx_count += 1
+ return oldread, oldwrite
+
+ def reset_security(self, read, write):
+ txstore = self._threaddata
+ txstore.ctx_count -= 1
+ if txstore.ctx_count == 0:
+ self._clear_thread_storage(txstore)
+ else:
+ if read is not None:
+ self.set_read_security(read)
+ if write is not None:
+ self.set_write_security(write)
+
@property
def read_security(self):
"""return a boolean telling if read security is activated or not"""
@@ -538,6 +613,28 @@
self._threaddata.hooks_mode = mode
return oldmode
+ def init_hooks_mode_categories(self, mode, categories):
+ oldmode = self.set_hooks_mode(mode)
+ if mode is self.HOOKS_DENY_ALL:
+ changes = self.enable_hook_categories(*categories)
+ else:
+ changes = self.disable_hook_categories(*categories)
+ self._threaddata.ctx_count += 1
+ return oldmode, changes
+
+ def reset_hooks_mode_categories(self, oldmode, mode, categories):
+ txstore = self._threaddata
+ txstore.ctx_count -= 1
+ if txstore.ctx_count == 0:
+ self._clear_thread_storage(txstore)
+ else:
+ if categories:
+ if mode is self.HOOKS_DENY_ALL:
+ return self.disable_hook_categories(*categories)
+ else:
+ return self.enable_hook_categories(*categories)
+ self.set_hooks_mode(oldmode)
+
@property
def disabled_hook_categories(self):
try:
@@ -561,17 +658,18 @@
- on HOOKS_ALLOW_ALL mode, ensure those categories are disabled
"""
changes = set()
+ self.pruned_hooks_cache.clear()
if self.hooks_mode is self.HOOKS_DENY_ALL:
- enablecats = self.enabled_hook_categories
+ enabledcats = self.enabled_hook_categories
for category in categories:
- if category in enablecats:
- enablecats.remove(category)
+ if category in enabledcats:
+ enabledcats.remove(category)
changes.add(category)
else:
- disablecats = self.disabled_hook_categories
+ disabledcats = self.disabled_hook_categories
for category in categories:
- if category not in disablecats:
- disablecats.add(category)
+ if category not in disabledcats:
+ disabledcats.add(category)
changes.add(category)
return tuple(changes)
@@ -582,17 +680,18 @@
- on HOOKS_ALLOW_ALL mode, ensure those categories are not disabled
"""
changes = set()
+ self.pruned_hooks_cache.clear()
if self.hooks_mode is self.HOOKS_DENY_ALL:
- enablecats = self.enabled_hook_categories
+ enabledcats = self.enabled_hook_categories
for category in categories:
- if category not in enablecats:
- enablecats.add(category)
+ if category not in enabledcats:
+ enabledcats.add(category)
changes.add(category)
else:
- disablecats = self.disabled_hook_categories
+ disabledcats = self.disabled_hook_categories
for category in categories:
- if category in self.disabled_hook_categories:
- disablecats.remove(category)
+ if category in disabledcats:
+ disabledcats.remove(category)
changes.add(category)
return tuple(changes)
@@ -612,19 +711,19 @@
# connection management ###################################################
- def keep_pool_mode(self, mode):
- """set pool_mode, e.g. how the session will keep its pool:
+ def keep_cnxset_mode(self, mode):
+ """set `mode`, e.g. how the session will keep its connections set:
- * if mode == 'write', the pool is freed after each ready query, but kept
- until the transaction's end (eg commit or rollback) when a write query
- is detected (eg INSERT/SET/DELETE queries)
+ * if mode == 'write', the connections set is freed after each ready
+ query, but kept until the transaction's end (eg commit or rollback)
+ when a write query is detected (eg INSERT/SET/DELETE queries)
- * if mode == 'transaction', the pool is only freed after the
+ * if mode == 'transaction', the connections set is only freed after the
transaction's end
- notice that a repository has a limited set of pools, and a session has to
- wait for a free pool to run any rql query (unless it already has a pool
- set).
+ notice that a repository has a limited set of connections sets, and a
+ session has to wait for a free connections set to run any rql query
+ (unless it already has one set).
"""
assert mode in ('transaction', 'write')
if mode == 'transaction':
@@ -647,56 +746,58 @@
commit_state = property(get_commit_state, set_commit_state)
@property
- def pool(self):
- """connections pool, set according to transaction mode for each query"""
+ def cnxset(self):
+ """connections set, set according to transaction mode for each query"""
if self._closed:
- self.reset_pool(True)
- raise Exception('try to access pool on a closed session')
- return getattr(self._threaddata, 'pool', None)
+ self.free_cnxset(True)
+ raise Exception('try to access connections set on a closed session')
+ return getattr(self._threaddata, 'cnxset', None)
- def set_pool(self):
- """the session need a pool to execute some queries"""
+ def set_cnxset(self):
+ """the session need a connections set to execute some queries"""
with self._closed_lock:
if self._closed:
- self.reset_pool(True)
- raise Exception('try to set pool on a closed session')
- if self.pool is None:
- # get pool first to avoid race-condition
- self._threaddata.pool = pool = self.repo._get_pool()
+ self.free_cnxset(True)
+ raise Exception('try to set connections set on a closed session')
+ if self.cnxset is None:
+ # get connections set first to avoid race-condition
+ self._threaddata.cnxset = cnxset = self.repo._get_cnxset()
+ self._threaddata.ctx_count += 1
try:
- pool.pool_set()
+ cnxset.cnxset_set()
except:
- self._threaddata.pool = None
- self.repo._free_pool(pool)
+ self._threaddata.cnxset = None
+ self.repo._free_cnxset(cnxset)
raise
self._threads_in_transaction.add(
- (threading.currentThread(), pool) )
- return self._threaddata.pool
+ (threading.currentThread(), cnxset) )
+ return self._threaddata.cnxset
- def _free_thread_pool(self, thread, pool, force_close=False):
+ def _free_thread_cnxset(self, thread, cnxset, force_close=False):
try:
- self._threads_in_transaction.remove( (thread, pool) )
+ self._threads_in_transaction.remove( (thread, cnxset) )
except KeyError:
- # race condition on pool freeing (freed by commit or rollback vs
+ # race condition on cnxset freeing (freed by commit or rollback vs
# close)
pass
else:
if force_close:
- pool.reconnect()
+ cnxset.reconnect()
else:
- pool.pool_reset()
- # free pool once everything is done to avoid race-condition
- self.repo._free_pool(pool)
+ cnxset.cnxset_freed()
+ # free cnxset once everything is done to avoid race-condition
+ self.repo._free_cnxset(cnxset)
- def reset_pool(self, ignoremode=False):
- """the session is no longer using its pool, at least for some time"""
- # pool may be none if no operation has been done since last commit
+ def free_cnxset(self, ignoremode=False):
+ """the session is no longer using its connections set, at least for some time"""
+ # cnxset may be none if no operation has been done since last commit
# or rollback
- pool = getattr(self._threaddata, 'pool', None)
- if pool is not None and (ignoremode or self.mode == 'read'):
+ cnxset = getattr(self._threaddata, 'cnxset', None)
+ if cnxset is not None and (ignoremode or self.mode == 'read'):
# even in read mode, we must release the current transaction
- self._free_thread_pool(threading.currentThread(), pool)
- del self._threaddata.pool
+ self._free_thread_cnxset(threading.currentThread(), cnxset)
+ del self._threaddata.cnxset
+ self._threaddata.ctx_count -= 1
def _touch(self):
"""update latest session usage timestamp and reset mode to read"""
@@ -785,9 +886,9 @@
rset.req = self
return rset
- def _clear_thread_data(self, reset_pool=True):
- """remove everything from the thread local storage, except pool
- which is explicitly removed by reset_pool, and mode which is set anyway
+ def _clear_thread_data(self, free_cnxset=True):
+ """remove everything from the thread local storage, except connections set
+ which is explicitly removed by free_cnxset, and mode which is set anyway
by _touch
"""
try:
@@ -795,23 +896,38 @@
except AttributeError:
pass
else:
- if reset_pool:
- self._tx_data.pop(txstore.transactionid, None)
- try:
- del self.__threaddata.txdata
- except AttributeError:
- pass
+ if free_cnxset:
+ self.free_cnxset()
+ if txstore.ctx_count == 0:
+ self._clear_thread_storage(txstore)
+ else:
+ self._clear_tx_storage(txstore)
else:
- for name in ('commit_state', 'transaction_data',
- 'pending_operations', '_rewriter'):
- try:
- delattr(txstore, name)
- except AttributeError:
- continue
+ self._clear_tx_storage(txstore)
+
+ def _clear_thread_storage(self, txstore):
+ self._tx_data.pop(txstore.transactionid, None)
+ try:
+ del self.__threaddata.txdata
+ except AttributeError:
+ pass
- def commit(self, reset_pool=True):
+ def _clear_tx_storage(self, txstore):
+ for name in ('commit_state', 'transaction_data',
+ 'pending_operations', '_rewriter',
+ 'pruned_hooks_cache'):
+ try:
+ delattr(txstore, name)
+ except AttributeError:
+ continue
+
+ def commit(self, free_cnxset=True, reset_pool=None):
"""commit the current session's transaction"""
- if self.pool is None:
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ if self.cnxset is None:
assert not self.pending_operations
self._clear_thread_data()
self._touch()
@@ -860,9 +976,9 @@
# XXX use slice notation since self.pending_operations is a
# read-only property.
self.pending_operations[:] = processed + self.pending_operations
- self.rollback(reset_pool)
+ self.rollback(free_cnxset)
raise
- self.pool.commit()
+ self.cnxset.commit()
self.commit_state = 'postcommit'
while self.pending_operations:
operation = self.pending_operations.pop(0)
@@ -876,15 +992,19 @@
return self.transaction_uuid(set=False)
finally:
self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
- self._clear_thread_data(reset_pool)
+ if free_cnxset:
+ self.free_cnxset(ignoremode=True)
+ self._clear_thread_data(free_cnxset)
- def rollback(self, reset_pool=True):
+ def rollback(self, free_cnxset=True, reset_pool=None):
"""rollback the current session's transaction"""
- # don't use self.pool, rollback may be called with _closed == True
- pool = getattr(self._threaddata, 'pool', None)
- if pool is None:
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ # don't use self.cnxset, rollback may be called with _closed == True
+ cnxset = getattr(self._threaddata, 'cnxset', None)
+ if cnxset is None:
self._clear_thread_data()
self._touch()
self.debug('rollback session %s done (no db activity)', self.id)
@@ -899,20 +1019,20 @@
except:
self.critical('rollback error', exc_info=sys.exc_info())
continue
- pool.rollback()
+ cnxset.rollback()
self.debug('rollback for session %s done', self.id)
finally:
self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
- self._clear_thread_data(reset_pool)
+ if free_cnxset:
+ self.free_cnxset(ignoremode=True)
+ self._clear_thread_data(free_cnxset)
def close(self):
- """do not close pool on session close, since they are shared now"""
+ """do not close connections set on session close, since they are shared now"""
with self._closed_lock:
self._closed = True
# copy since _threads_in_transaction maybe modified while waiting
- for thread, pool in self._threads_in_transaction.copy():
+ for thread, cnxset in self._threads_in_transaction.copy():
if thread is threading.currentThread():
continue
self.info('waiting for thread %s', thread)
@@ -922,12 +1042,12 @@
for i in xrange(10):
thread.join(1)
if not (thread.isAlive() and
- (thread, pool) in self._threads_in_transaction):
+ (thread, cnxset) in self._threads_in_transaction):
break
else:
self.error('thread %s still alive after 10 seconds, will close '
'session anyway', thread)
- self._free_thread_pool(thread, pool, force_close=True)
+ self._free_thread_cnxset(thread, cnxset, force_close=True)
self.rollback()
del self.__threaddata
del self._tx_data
@@ -954,9 +1074,16 @@
self._threaddata.pending_operations = []
return self._threaddata.pending_operations
+ @property
+ def pruned_hooks_cache(self):
+ try:
+ return self._threaddata.pruned_hooks_cache
+ except AttributeError:
+ self._threaddata.pruned_hooks_cache = {}
+ return self._threaddata.pruned_hooks_cache
+
def add_operation(self, operation, index=None):
- """add an observer"""
- assert self.commit_state != 'commit'
+ """add an operation"""
if index is None:
self.pending_operations.append(operation)
else:
@@ -1063,6 +1190,19 @@
# deprecated ###############################################################
+ @property
+ @deprecated("[3.13] use .cnxset attribute instead of .pool")
+ def pool(self):
+ return self.cnxset
+
+ @deprecated("[3.13] use .set_cnxset() method instead of .set_pool()")
+ def set_pool(self):
+ return self.set_cnxset()
+
+ @deprecated("[3.13] use .free_cnxset() method instead of .reset_pool()")
+ def reset_pool(self):
+ return self.free_cnxset()
+
@deprecated("[3.7] execute is now unsafe by default in hooks/operation. You"
" can also control security with the security_enabled context "
"manager")
@@ -1128,12 +1268,12 @@
self.disable_hook_categories('integrity')
@property
- def pool(self):
- """connections pool, set according to transaction mode for each query"""
+ def cnxset(self):
+ """connections set, set according to transaction mode for each query"""
if self.repo.shutting_down:
- self.reset_pool(True)
+ self.free_cnxset(True)
raise Exception('repository is shutting down')
- return getattr(self._threaddata, 'pool', None)
+ return getattr(self._threaddata, 'cnxset', None)
class InternalManager(object):
--- a/server/sources/__init__.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/__init__.py Wed Jun 08 17:08:00 2011 +0200
@@ -230,23 +230,23 @@
def check_connection(self, cnx):
"""Check connection validity, return None if the connection is still
- valid else a new connection (called when the pool using the given
- connection is being attached to a session). Do nothing by default.
+ valid else a new connection (called when the connections set using the
+ given connection is being attached to a session). Do nothing by default.
"""
pass
- def close_pool_connections(self):
- for pool in self.repo.pools:
- pool._cursors.pop(self.uri, None)
- pool.source_cnxs[self.uri][1].close()
+ def close_source_connections(self):
+ for cnxset in self.repo.cnxsets:
+ cnxset._cursors.pop(self.uri, None)
+ cnxset.source_cnxs[self.uri][1].close()
- def open_pool_connections(self):
- for pool in self.repo.pools:
- pool.source_cnxs[self.uri] = (self, self.get_connection())
+ def open_source_connections(self):
+ for cnxset in self.repo.cnxsets:
+ cnxset.source_cnxs[self.uri] = (self, self.get_connection())
- def pool_reset(self, cnx):
- """the pool using the given connection is being reseted from its current
- attached session
+ def cnxset_freed(self, cnx):
+ """the connections set holding the given connection is being reseted
+ from its current attached session.
do nothing by default
"""
@@ -404,7 +404,7 @@
.executemany().
"""
res = self.syntax_tree_search(session, union, args, varmap=varmap)
- session.pool.source('system').manual_insert(res, table, session)
+ session.cnxset.source('system').manual_insert(res, table, session)
# write modification api ###################################################
# read-only sources don't have to implement methods below
--- a/server/sources/datafeed.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/datafeed.py Wed Jun 08 17:08:00 2011 +0200
@@ -18,13 +18,21 @@
"""datafeed sources: copy data from an external data stream into the system
database
"""
+from __future__ import with_statement
+
+import urllib2
+import StringIO
from datetime import datetime, timedelta
from base64 import b64decode
+from cookielib import CookieJar
-from cubicweb import RegistryNotFound, ObjectNotFound, ValidationError
+from lxml import etree
+
+from cubicweb import RegistryNotFound, ObjectNotFound, ValidationError, UnknownEid
from cubicweb.server.sources import AbstractSource
from cubicweb.appobject import AppObject
+
class DataFeedSource(AbstractSource):
copy_based_source = True
@@ -120,27 +128,50 @@
return False
return datetime.utcnow() < (self.latest_retrieval + self.synchro_interval)
+ def update_latest_retrieval(self, session):
+ self.latest_retrieval = datetime.utcnow()
+ session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
+ {'x': self.eid, 'date': self.latest_retrieval})
+
+ def acquire_synchronization_lock(self, session):
+ # XXX race condition until WHERE of SET queries is executed using
+ # 'SELECT FOR UPDATE'
+ if not session.execute('SET X synchronizing TRUE WHERE X eid %(x)s, X synchronizing FALSE',
+ {'x': self.eid}):
+ self.error('concurrent synchronization detected, skip pull')
+ session.commit(free_cnxset=False)
+ return False
+ session.commit(free_cnxset=False)
+ return True
+
+ def release_synchronization_lock(self, session):
+ session.execute('SET X synchronizing FALSE WHERE X eid %(x)s',
+ {'x': self.eid})
+ session.commit()
+
def pull_data(self, session, force=False, raise_on_error=False):
+ """Launch synchronization of the source if needed.
+
+ This method is responsible to handle commit/rollback on the given
+ session.
+ """
if not force and self.fresh():
return {}
+ if not self.acquire_synchronization_lock(session):
+ return {}
+ try:
+ with session.transaction(free_cnxset=False):
+ return self._pull_data(session, force, raise_on_error)
+ finally:
+ self.release_synchronization_lock(session)
+
+ def _pull_data(self, session, force=False, raise_on_error=False):
if self.config['delete-entities']:
myuris = self.source_cwuris(session)
else:
myuris = None
parser = self._get_parser(session, sourceuris=myuris)
- error = False
- self.info('pulling data for source %s', self.uri)
- for url in self.urls:
- try:
- if parser.process(url):
- error = True
- except IOError, exc:
- if raise_on_error:
- raise
- self.error('could not pull data while processing %s: %s',
- url, exc)
- error = True
- if error:
+ if self.process_urls(parser, self.urls, raise_on_error):
self.warning("some error occured, don't attempt to delete entities")
elif self.config['delete-entities'] and myuris:
byetype = {}
@@ -150,11 +181,24 @@
for etype, eids in byetype.iteritems():
session.execute('DELETE %s X WHERE X eid IN (%s)'
% (etype, ','.join(eids)))
- self.latest_retrieval = datetime.utcnow()
- session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
- {'x': self.eid, 'date': self.latest_retrieval})
+ self.update_latest_retrieval(session)
return parser.stats
+ def process_urls(self, parser, urls, raise_on_error=False):
+ error = False
+ for url in urls:
+ self.info('pulling data from %s', url)
+ try:
+ if parser.process(url, raise_on_error):
+ error = True
+ except IOError, exc:
+ if raise_on_error:
+ raise
+ self.error('could not pull data while processing %s: %s',
+ url, exc)
+ error = True
+ return error
+
def before_entity_insertion(self, session, lid, etype, eid, sourceparams):
"""called by the repository when an eid has been attributed for an
entity stored here but the entity has not been inserted in the system
@@ -194,8 +238,8 @@
class DataFeedParser(AppObject):
__registry__ = 'parsers'
- def __init__(self, session, source, sourceuris=None):
- self._cw = session
+ def __init__(self, session, source, sourceuris=None, **kwargs):
+ super(DataFeedParser, self).__init__(session, **kwargs)
self.source = source
self.sourceuris = sourceuris
self.stats = {'created': set(),
@@ -212,14 +256,28 @@
raise ValidationError(schemacfg.eid, {None: msg})
def extid2entity(self, uri, etype, **sourceparams):
+ """return an entity for the given uri. May return None if it should be
+ skipped
+ """
sourceparams['parser'] = self
eid = self.source.extid2eid(str(uri), etype, self._cw,
sourceparams=sourceparams)
+ if eid < 0:
+ # entity has been moved away from its original source
+ #
+ # Don't give etype to entity_from_eid so we get UnknownEid if the
+ # entity has been removed
+ try:
+ entity = self._cw.entity_from_eid(-eid)
+ except UnknownEid:
+ return None
+ self.notify_updated(entity) # avoid later update from the source's data
+ return entity
if self.sourceuris is not None:
self.sourceuris.pop(str(uri), None)
return self._cw.entity_from_eid(eid, etype)
- def process(self, url):
+ def process(self, url, partialcommit=True):
"""main callback: process the url"""
raise NotImplementedError
@@ -237,3 +295,64 @@
def notify_updated(self, entity):
return self.stats['updated'].add(entity.eid)
+
+
+class DataFeedXMLParser(DataFeedParser):
+
+ def process(self, url, raise_on_error=False, partialcommit=True):
+ """IDataFeedParser main entry point"""
+ try:
+ parsed = self.parse(url)
+ except Exception, ex:
+ self.source.error(str(ex))
+ return True
+ error = False
+ for args in parsed:
+ try:
+ self.process_item(*args)
+ if partialcommit:
+ # commit+set_cnxset instead of commit(free_cnxset=False) to let
+ # other a chance to get our connections set
+ self._cw.commit()
+ self._cw.set_cnxset()
+ except ValidationError, exc:
+ if raise_on_error:
+ raise
+ if partialcommit:
+ self.source.error('Skipping %s because of validation error %s' % (args, exc))
+ self._cw.rollback()
+ self._cw.set_cnxset()
+ error = True
+ else:
+ raise
+ return error
+
+ def parse(self, url):
+ if url.startswith('http'):
+ from cubicweb.sobjects.parsers import HOST_MAPPING
+ for mappedurl in HOST_MAPPING:
+ if url.startswith(mappedurl):
+ url = url.replace(mappedurl, HOST_MAPPING[mappedurl], 1)
+ break
+ self.source.info('GET %s', url)
+ stream = _OPENER.open(url)
+ elif url.startswith('file://'):
+ stream = open(url[7:])
+ else:
+ stream = StringIO.StringIO(url)
+ return self.parse_etree(etree.parse(stream).getroot())
+
+ def parse_etree(self, document):
+ return [(document,)]
+
+ def process_item(self, *args):
+ raise NotImplementedError
+
+# use a cookie enabled opener to use session cookie if any
+_OPENER = urllib2.build_opener()
+try:
+ from logilab.common import urllib2ext
+ _OPENER.add_handler(urllib2ext.HTTPGssapiAuthHandler())
+except ImportError: # python-kerberos not available
+ pass
+_OPENER.add_handler(urllib2.HTTPCookieProcessor(CookieJar()))
--- a/server/sources/extlite.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/extlite.py Wed Jun 08 17:08:00 2011 +0200
@@ -102,19 +102,19 @@
def backup(self, backupfile, confirm):
"""method called to create a backup of the source's data"""
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.sqladapter.backup_to_file(backupfile, confirm)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
def restore(self, backupfile, confirm, drop):
"""method called to restore a backup of source's data"""
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.sqladapter.restore_from_file(backupfile, confirm, drop)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
@property
def _sqlcnx(self):
@@ -174,15 +174,15 @@
def check_connection(self, cnx):
"""check connection validity, return None if the connection is still valid
- else a new connection (called when the pool using the given connection is
+ else a new connection (called when the connections set holding the given connection is
being attached to a session)
always return the connection to reset eventually cached cursor
"""
return cnx
- def pool_reset(self, cnx):
- """the pool using the given connection is being reseted from its current
+ def cnxset_freed(self, cnx):
+ """the connections set holding the given connection is being freed from its current
attached session: release the connection lock if the connection wrapper
has a connection set
"""
@@ -286,7 +286,7 @@
"""
if server.DEBUG:
print 'exec', query, args
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
try:
# str(query) to avoid error if it's an unicode string
cursor.execute(str(query), args)
@@ -294,7 +294,7 @@
self.critical("sql: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
self.critical('transaction has been rollbacked')
except:
pass
--- a/server/sources/ldapuser.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/ldapuser.py Wed Jun 08 17:08:00 2011 +0200
@@ -310,7 +310,11 @@
except Exception:
self.error('while trying to authenticate %s', user, exc_info=True)
raise AuthenticationError()
- return self.extid2eid(user['dn'], 'CWUser', session)
+ eid = self.extid2eid(user['dn'], 'CWUser', session)
+ if eid < 0:
+ # user has been moved away from this source
+ raise AuthenticationError()
+ return eid
def ldap_name(self, var):
if var.stinfo['relations']:
@@ -392,7 +396,7 @@
break
assert mainvars, rqlst
columns, globtransforms = self.prepare_columns(mainvars, rqlst)
- eidfilters = []
+ eidfilters = [lambda x: x > 0]
allresults = []
generator = RQL2LDAPFilter(self, session, args, mainvars)
for mainvar in mainvars:
@@ -524,9 +528,9 @@
"""make an ldap query"""
self.debug('ldap search %s %s %s %s %s', self.uri, base, scope,
searchstr, list(attrs))
- # XXX for now, we do not have connection pool support for LDAP, so
+ # XXX for now, we do not have connections set support for LDAP, so
# this is always self._conn
- cnx = session.pool.connection(self.uri).cnx
+ cnx = session.cnxset.connection(self.uri).cnx
try:
res = cnx.search_s(base, scope, searchstr, attrs)
except ldap.PARTIAL_RESULTS:
--- a/server/sources/native.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/native.py Wed Jun 08 17:08:00 2011 +0200
@@ -304,9 +304,9 @@
self.dbhelper.dbname = abspath(self.dbhelper.dbname)
self.get_connection = lambda: ConnectionWrapper(self)
self.check_connection = lambda cnx: cnx
- def pool_reset(cnx):
+ def cnxset_freed(cnx):
cnx.close()
- self.pool_reset = pool_reset
+ self.cnxset_freed = cnxset_freed
if self.dbdriver == 'sqlite':
self._create_eid = None
self.create_eid = self._create_eid_sqlite
@@ -346,21 +346,21 @@
"""execute the query and return its result"""
return self.process_result(self.doexec(session, sql, args))
- def init_creating(self, pool=None):
+ def init_creating(self, cnxset=None):
# check full text index availibility
if self.do_fti:
- if pool is None:
- _pool = self.repo._get_pool()
- _pool.pool_set()
+ if cnxset is None:
+ _cnxset = self.repo._get_cnxset()
+ _cnxset.cnxset_set()
else:
- _pool = pool
- if not self.dbhelper.has_fti_table(_pool['system']):
+ _cnxset = cnxset
+ if not self.dbhelper.has_fti_table(_cnxset['system']):
if not self.repo.config.creating:
self.critical('no text index table')
self.do_fti = False
- if pool is None:
- _pool.pool_reset()
- self.repo._free_pool(_pool)
+ if cnxset is None:
+ _cnxset.cnxset_freed()
+ self.repo._free_cnxset(_cnxset)
def backup(self, backupfile, confirm, format='native'):
"""method called to create a backup of the source's data"""
@@ -368,25 +368,25 @@
self.repo.fill_schema()
self.set_schema(self.repo.schema)
helper = DatabaseIndependentBackupRestore(self)
- self.close_pool_connections()
+ self.close_source_connections()
try:
helper.backup(backupfile)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
elif format == 'native':
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.backup_to_file(backupfile, confirm)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
else:
raise ValueError('Unknown format %r' % format)
def restore(self, backupfile, confirm, drop, format='native'):
"""method called to restore a backup of source's data"""
- if self.repo.config.open_connections_pools:
- self.close_pool_connections()
+ if self.repo.config.init_cnxset_pool:
+ self.close_source_connections()
try:
if format == 'portable':
helper = DatabaseIndependentBackupRestore(self)
@@ -396,12 +396,12 @@
else:
raise ValueError('Unknown format %r' % format)
finally:
- if self.repo.config.open_connections_pools:
- self.open_pool_connections()
+ if self.repo.config.init_cnxset_pool:
+ self.open_source_connections()
def init(self, activated, source_entity):
- self.init_creating(source_entity._cw.pool)
+ self.init_creating(source_entity._cw.cnxset)
def shutdown(self):
if self._eid_creation_cnx:
@@ -523,13 +523,13 @@
raise
# FIXME: better detection of deconnection pb
self.warning("trying to reconnect")
- session.pool.reconnect(self)
+ session.cnxset.reconnect(self)
cursor = self.doexec(session, sql, args)
except (self.DbapiError,), exc:
# We get this one with pyodbc and SQL Server when connection was reset
if exc.args[0] == '08S01' and session.mode != 'write':
self.warning("trying to reconnect")
- session.pool.reconnect(self)
+ session.cnxset.reconnect(self)
cursor = self.doexec(session, sql, args)
else:
raise
@@ -718,9 +718,9 @@
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
if server.DEBUG & server.DBG_SQL:
- cnx = session.pool.connection(self.uri)
+ cnx = session.cnxset.connection(self.uri)
# getattr to get the actual connection if cnx is a ConnectionWrapper
# instance
print 'exec', query, args, getattr(cnx, '_cnx', cnx)
@@ -735,7 +735,7 @@
query, args, ex.args[0])
if rollback:
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rollbacked')
except:
@@ -764,7 +764,7 @@
"""
if server.DEBUG & server.DBG_SQL:
print 'execmany', query, 'with', len(args), 'arguments'
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
try:
# str(query) to avoid error if it's an unicode string
cursor.executemany(str(query), args)
@@ -775,7 +775,7 @@
self.critical("sql many: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rollbacked')
except:
@@ -793,7 +793,7 @@
self.error("backend can't alter %s.%s to %s%s", table, column, coltype,
not allownull and 'NOT NULL' or '')
return
- self.dbhelper.change_col_type(LogCursor(session.pool[self.uri]),
+ self.dbhelper.change_col_type(LogCursor(session.cnxset[self.uri]),
table, column, coltype, allownull)
self.info('altered %s.%s: now %s%s', table, column, coltype,
not allownull and 'NOT NULL' or '')
@@ -808,7 +808,7 @@
return
table, column = rdef_table_column(rdef)
coltype, allownull = rdef_physical_info(self.dbhelper, rdef)
- self.dbhelper.set_null_allowed(LogCursor(session.pool[self.uri]),
+ self.dbhelper.set_null_allowed(LogCursor(session.cnxset[self.uri]),
table, column, coltype, allownull)
def update_rdef_indexed(self, session, rdef):
@@ -826,11 +826,11 @@
self.drop_index(session, table, column, unique=True)
def create_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.pool[self.uri])
+ cursor = LogCursor(session.cnxset[self.uri])
self.dbhelper.create_index(cursor, table, column, unique)
def drop_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.pool[self.uri])
+ cursor = LogCursor(session.cnxset[self.uri])
self.dbhelper.drop_index(cursor, table, column, unique)
# system source interface #################################################
@@ -841,7 +841,7 @@
try:
res = self.doexec(session, sql).fetchone()
except:
- assert session.pool, 'session has no pool set'
+ assert session.cnxset, 'session has no connections set'
raise UnknownEid(eid)
if res is None:
raise UnknownEid(eid)
@@ -1135,7 +1135,7 @@
important note: while undoing of a transaction, only hooks in the
'integrity', 'activeintegrity' and 'undo' categories are called.
"""
- # set mode so pool isn't released subsquently until commit/rollback
+ # set mode so connections set isn't released subsquently until commit/rollback
session.mode = 'write'
errors = []
session.transaction_data['undoing_uuid'] = txuuid
@@ -1380,7 +1380,7 @@
def fti_unindex_entities(self, session, entities):
"""remove text content for entities from the full text index
"""
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
cursor_unindex_object = self.dbhelper.cursor_unindex_object
try:
for entity in entities:
@@ -1393,7 +1393,7 @@
"""add text content of created/modified entities to the full text index
"""
cursor_index_object = self.dbhelper.cursor_index_object
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
try:
# use cursor_index_object, not cursor_reindex_object since
# unindexing done in the FTIndexEntityOp
--- a/server/sources/pyrorql.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/pyrorql.py Wed Jun 08 17:08:00 2011 +0200
@@ -234,10 +234,12 @@
etype, dexturi, dextid = cnx.describe(extid)
if dexturi == 'system' or not (
dexturi in self.repo.sources_by_uri or self._skip_externals):
- return self.repo.extid2eid(self, str(extid), etype, session), True
- if dexturi in self.repo.sources_by_uri:
+ eid = self.repo.extid2eid(self, str(extid), etype, session)
+ if eid > 0:
+ return eid, True
+ elif dexturi in self.repo.sources_by_uri:
source = self.repo.sources_by_uri[dexturi]
- cnx = session.pool.connection(source.uri)
+ cnx = session.cnxset.connection(source.uri)
eid = source.local_eid(cnx, dextid, session)[0]
return eid, False
return None, None
@@ -322,7 +324,7 @@
else a new connection
"""
# we have to transfer manually thread ownership. This can be done safely
- # since the pool to which belong the connection is affected to one
+ # since the connections set holding the connection is affected to one
# session/thread and can't be called simultaneously
try:
cnx._repo._transferThread(threading.currentThread())
@@ -359,7 +361,7 @@
if not args is None:
args = args.copy()
# get cached cursor anyway
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
if cu is None:
# this is a ConnectionWrapper instance
msg = session._("can't connect to source %s, some data may be missing")
@@ -390,7 +392,7 @@
or uidtype(union, i, etype, args)):
needtranslation.append(i)
if needtranslation:
- cnx = session.pool.connection(self.uri)
+ cnx = session.cnxset.connection(self.uri)
for rowindex in xrange(rset.rowcount - 1, -1, -1):
row = rows[rowindex]
localrow = False
@@ -434,37 +436,37 @@
def update_entity(self, session, entity):
"""update an entity in the source"""
relations, kwargs = self._entity_relations_and_kwargs(session, entity)
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs)
self._query_cache.clear()
- entity.clear_all_caches()
+ entity.cw_clear_all_caches()
def delete_entity(self, session, entity):
"""delete an entity from the source"""
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('DELETE %s X WHERE X eid %%(x)s' % entity.__regid__,
{'x': self.eid2extid(entity.eid, session)})
self._query_cache.clear()
def add_relation(self, session, subject, rtype, object):
"""add a relation to the source"""
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
{'x': self.eid2extid(subject, session),
'y': self.eid2extid(object, session)})
self._query_cache.clear()
- session.entity_from_eid(subject).clear_all_caches()
- session.entity_from_eid(object).clear_all_caches()
+ session.entity_from_eid(subject).cw_clear_all_caches()
+ session.entity_from_eid(object).cw_clear_all_caches()
def delete_relation(self, session, subject, rtype, object):
"""delete a relation from the source"""
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('DELETE X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
{'x': self.eid2extid(subject, session),
'y': self.eid2extid(object, session)})
self._query_cache.clear()
- session.entity_from_eid(subject).clear_all_caches()
- session.entity_from_eid(object).clear_all_caches()
+ session.entity_from_eid(subject).cw_clear_all_caches()
+ session.entity_from_eid(object).cw_clear_all_caches()
class RQL2RQL(object):
--- a/server/sources/rql2sql.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/rql2sql.py Wed Jun 08 17:08:00 2011 +0200
@@ -1366,6 +1366,8 @@
operator = ' LIKE '
else:
operator = ' %s ' % operator
+ elif operator == 'REGEXP':
+ return ' %s' % self.dbhelper.sql_regexp_match_expression(rhs.accept(self))
elif (operator == '=' and isinstance(rhs, Constant)
and rhs.eval(self._args) is None):
if lhs is None:
@@ -1416,6 +1418,8 @@
if constant.type is None:
return 'NULL'
value = constant.value
+ if constant.type == 'etype':
+ return value
if constant.type == 'Int' and isinstance(constant.parent, SortTerm):
return value
if constant.type in ('Date', 'Datetime'):
--- a/server/sources/storages.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/sources/storages.py Wed Jun 08 17:08:00 2011 +0200
@@ -204,7 +204,7 @@
"""return the current fs_path of the tribute.
Return None is the attr is not stored yet."""
- sysource = entity._cw.pool.source('system')
+ sysource = entity._cw.cnxset.source('system')
cu = sysource.doexec(entity._cw,
'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
attr, entity.__regid__, entity.eid))
--- a/server/test/unittest_datafeed.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_datafeed.py Wed Jun 08 17:08:00 2011 +0200
@@ -39,7 +39,7 @@
class AParser(datafeed.DataFeedParser):
__regid__ = 'testparser'
- def process(self, url):
+ def process(self, url, raise_on_error=False):
entity = self.extid2entity('http://www.cubicweb.org/', 'Card',
item={'title': u'cubicweb.org',
'content': u'the cw web site'})
--- a/server/test/unittest_hook.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_hook.py Wed Jun 08 17:08:00 2011 +0200
@@ -23,7 +23,7 @@
from logilab.common.testlib import TestCase, unittest_main, mock_object
-from cubicweb.devtools import TestServerConfiguration
+from cubicweb.devtools import TestServerConfiguration, fake
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.server import hook
from cubicweb.hooks import integrity, syncschema
@@ -124,10 +124,8 @@
def test_call_hook(self):
self.o.register(AddAnyHook)
dis = set()
- cw = mock_object(vreg=self.vreg,
- set_read_security=lambda *a,**k: None,
- set_write_security=lambda *a,**k: None,
- is_hook_activated=lambda x, cls: cls.category not in dis)
+ cw = fake.FakeSession()
+ cw.is_hook_activated = lambda cls: cls.category not in dis
self.assertRaises(HookCalled,
self.o.call_hooks, 'before_add_entity', cw)
dis.add('cat1')
@@ -203,10 +201,10 @@
# self.assertEqual(self.called, [(1, 'concerne', 2), (3, 'concerne', 4)])
-# def _before_relation_hook(self, pool, subject, r_type, object):
+# def _before_relation_hook(self, cnxset, subject, r_type, object):
# self.called.append((subject, r_type, object))
-# def _after_relation_hook(self, pool, subject, r_type, object):
+# def _after_relation_hook(self, cnxset, subject, r_type, object):
# self.called.append((subject, r_type, object))
--- a/server/test/unittest_ldapuser.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_ldapuser.py Wed Jun 08 17:08:00 2011 +0200
@@ -137,7 +137,7 @@
def test_authenticate(self):
source = self.repo.sources_by_uri['ldapuser']
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(AuthenticationError,
source.authenticate, self.session, 'toto', 'toto')
@@ -239,7 +239,7 @@
iworkflowable.fire_transition('deactivate')
try:
cnx.commit()
- adim.clear_all_caches()
+ adim.cw_clear_all_caches()
self.assertEqual(adim.in_state[0].name, 'deactivated')
trinfo = iworkflowable.latest_trinfo()
self.assertEqual(trinfo.owned_by[0].login, SYT)
@@ -265,7 +265,7 @@
self.failUnless(self.sexecute('Any X,Y WHERE X login %(syt)s, Y login "cochon"', {'syt': SYT}))
def test_exists1(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.session.create_entity('CWGroup', name=u'bougloup1')
self.session.create_entity('CWGroup', name=u'bougloup2')
self.sexecute('SET U in_group G WHERE G name ~= "bougloup%", U login "admin"')
@@ -378,6 +378,23 @@
rset = cu.execute('Any F WHERE X has_text "iaminguestsgrouponly", X firstname F')
self.assertEqual(rset.rows, [[None]])
+ def test_copy_to_system_source(self):
+ eid = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT})[0][0]
+ self.sexecute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': eid})
+ self.commit()
+ rset = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT})
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system'},
+ 'type': 'CWUser',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ source = self.repo.sources_by_uri['ldapuser']
+ source.synchronize()
+ rset = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT})
+ self.assertEqual(len(rset), 1)
+
def test_nonregr1(self):
self.sexecute('Any X,AA ORDERBY AA DESC WHERE E eid %(x)s, E owned_by X, '
'X modification_date AA',
@@ -465,8 +482,8 @@
self._schema = repo.schema
super(RQL2LDAPFilterTC, self).setUp()
ldapsource = repo.sources[-1]
- self.pool = repo._get_pool()
- session = mock_object(pool=self.pool)
+ self.cnxset = repo._get_cnxset()
+ session = mock_object(cnxset=self.cnxset)
self.o = RQL2LDAPFilter(ldapsource, session)
self.ldapclasses = ''.join(ldapsource.base_filters)
--- a/server/test/unittest_migractions.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_migractions.py Wed Jun 08 17:08:00 2011 +0200
@@ -338,7 +338,7 @@
@tag('longrun')
def test_sync_schema_props_perms(self):
cursor = self.mh.session
- cursor.set_pool()
+ cursor.set_cnxset()
nbrqlexpr_start = cursor.execute('Any COUNT(X) WHERE X is RQLExpression')[0][0]
migrschema['titre'].rdefs[('Personne', 'String')].order = 7
migrschema['adel'].rdefs[('Personne', 'String')].order = 6
--- a/server/test/unittest_querier.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_querier.py Wed Jun 08 17:08:00 2011 +0200
@@ -311,6 +311,14 @@
seid = self.execute('State X WHERE X name "deactivated"')[0][0]
rset = self.execute('Any U,L,S GROUPBY U,L,S WHERE X in_state S, U login L, S eid %s' % seid)
+ def test_select_groupby_funccall(self):
+ rset = self.execute('Any YEAR(CD), COUNT(X) GROUPBY YEAR(CD) WHERE X is CWUser, X creation_date CD')
+ self.assertListEqual(rset.rows, [[date.today().year, 2]])
+
+ def test_select_groupby_colnumber(self):
+ rset = self.execute('Any YEAR(CD), COUNT(X) GROUPBY 1 WHERE X is CWUser, X creation_date CD')
+ self.assertListEqual(rset.rows, [[date.today().year, 2]])
+
def test_select_complex_orderby(self):
rset1 = self.execute('Any N ORDERBY N WHERE X name N')
self.assertEqual(sorted(rset1.rows), rset1.rows)
@@ -443,6 +451,15 @@
self.assertEqual(rset.rows[0][0], result)
self.assertEqual(rset.description, [('Int',)])
+ def test_regexp_based_pattern_matching(self):
+ peid1 = self.execute("INSERT Personne X: X nom 'bidule'")[0][0]
+ peid2 = self.execute("INSERT Personne X: X nom 'cidule'")[0][0]
+ rset = self.execute('Any X WHERE X is Personne, X nom REGEXP "^b"')
+ self.assertEqual(len(rset.rows), 1, rset.rows)
+ self.assertEqual(rset.rows[0][0], peid1)
+ rset = self.execute('Any X WHERE X is Personne, X nom REGEXP "idu"')
+ self.assertEqual(len(rset.rows), 2, rset.rows)
+
def test_select_aggregat_count(self):
rset = self.execute('Any COUNT(X)')
self.assertEqual(len(rset.rows), 1)
@@ -1099,7 +1116,7 @@
#'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y'
eeid, = self.o.execute(s, 'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y WHERE Y is EmailAddress')[0]
self.o.execute(s, "DELETE Email X")
- sqlc = s.pool['system']
+ sqlc = s.cnxset['system']
sqlc.execute('SELECT * FROM recipients_relation')
self.assertEqual(len(sqlc.fetchall()), 0)
sqlc.execute('SELECT * FROM owned_by_relation WHERE eid_from=%s'%eeid)
@@ -1212,7 +1229,7 @@
self.assertEqual(rset.description, [('CWUser',)])
self.assertRaises(Unauthorized,
self.execute, "Any P WHERE X is CWUser, X login 'bob', X upassword P")
- cursor = self.pool['system']
+ cursor = self.cnxset['system']
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
@@ -1227,7 +1244,7 @@
self.assertEqual(rset.description[0][0], 'CWUser')
rset = self.execute("SET X upassword %(pwd)s WHERE X is CWUser, X login 'bob'",
{'pwd': 'tutu'})
- cursor = self.pool['system']
+ cursor = self.cnxset['system']
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
--- a/server/test/unittest_repository.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_repository.py Wed Jun 08 17:08:00 2011 +0200
@@ -24,6 +24,7 @@
import sys
import threading
import time
+import logging
from copy import deepcopy
from datetime import datetime
@@ -62,7 +63,7 @@
table = SQL_PREFIX + 'CWEType'
namecol = SQL_PREFIX + 'name'
finalcol = SQL_PREFIX + 'final'
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % (
namecol, table, finalcol))
self.assertEqual(cu.fetchall(), [])
@@ -259,7 +260,7 @@
cnxid = repo.connect(self.admlogin, password=self.admpassword)
# rollback state change which trigger TrInfo insertion
session = repo._get_session(cnxid)
- session.set_pool()
+ session.set_cnxset()
user = session.user
user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
rset = repo.execute(cnxid, 'TrInfo T WHERE T wf_info_for X, X eid %(x)s', {'x': user.eid})
@@ -292,7 +293,7 @@
try:
with self.assertRaises(Exception) as cm:
run_transaction()
- self.assertEqual(str(cm.exception), 'try to access pool on a closed session')
+ self.assertEqual(str(cm.exception), 'try to access connections set on a closed session')
finally:
t.join()
@@ -382,7 +383,7 @@
def test_internal_api(self):
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
- session = repo._get_session(cnxid, setpool=True)
+ session = repo._get_session(cnxid, setcnxset=True)
self.assertEqual(repo.type_and_source_from_eid(2, session),
('CWGroup', 'system', None))
self.assertEqual(repo.type_from_eid(2, session), 'CWGroup')
@@ -519,31 +520,31 @@
class DataHelpersTC(CubicWebTC):
def test_create_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assert_(self.repo.system_source.create_eid(self.session))
def test_source_from_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertEqual(self.repo.source_from_eid(1, self.session),
self.repo.sources_by_uri['system'])
def test_source_from_eid_raise(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(UnknownEid, self.repo.source_from_eid, -2, self.session)
def test_type_from_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertEqual(self.repo.type_from_eid(2, self.session), 'CWGroup')
def test_type_from_eid_raise(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(UnknownEid, self.repo.type_from_eid, -2, self.session)
def test_add_delete_info(self):
entity = self.repo.vreg['etypes'].etype_class('Personne')(self.session)
entity.eid = -1
entity.complete = lambda x: None
- self.session.set_pool()
+ self.session.set_cnxset()
self.repo.add_info(self.session, entity, self.repo.system_source)
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
@@ -566,7 +567,7 @@
self.commit()
ts = datetime.now()
self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT mtime, eid FROM entities WHERE eid = %s' % eidp)
omtime = cu.fetchone()[0]
# our sqlite datetime adapter is ignore seconds fraction, so we have to
@@ -575,7 +576,7 @@
self.execute('SET X nom "tata" WHERE X eid %(x)s', {'x': eidp})
self.commit()
self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT mtime FROM entities WHERE eid = %s' % eidp)
mtime = cu.fetchone()[0]
self.failUnless(omtime < mtime)
@@ -646,7 +647,7 @@
CubicWebTC.setUp(self)
CALLED[:] = ()
- def _after_relation_hook(self, pool, fromeid, rtype, toeid):
+ def _after_relation_hook(self, cnxset, fromeid, rtype, toeid):
self.called.append((fromeid, rtype, toeid))
def test_inline_relation(self):
@@ -704,13 +705,18 @@
class PerformanceTest(CubicWebTC):
- def setup_database(self):
- import logging
+ def setUp(self):
+ super(PerformanceTest, self).setUp()
logger = logging.getLogger('cubicweb.session')
#logger.handlers = [logging.StreamHandler(sys.stdout)]
logger.setLevel(logging.INFO)
self.info = logger.info
+ def tearDown(self):
+ super(PerformanceTest, self).tearDown()
+ logger = logging.getLogger('cubicweb.session')
+ logger.setLevel(logging.CRITICAL)
+
def test_composite_deletion(self):
req = self.request()
personnes = []
--- a/server/test/unittest_rql2sql.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_rql2sql.py Wed Jun 08 17:08:00 2011 +0200
@@ -1348,6 +1348,18 @@
'''SELECT SUBSTR(_P.cw_nom, 1, 1)
FROM cw_Personne AS _P''')
+ def test_cast(self):
+ self._check("Any CAST(String, P) WHERE P is Personne",
+ '''SELECT CAST(_P.cw_eid AS text)
+FROM cw_Personne AS _P''')
+
+ def test_regexp(self):
+ self._check("Any X WHERE X login REGEXP '[0-9].*'",
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE _X.cw_login ~ [0-9].*
+''')
+
def test_parser_parse(self):
for t in self._parse(PARSER):
yield t
@@ -1653,6 +1665,9 @@
for t in self._parse(HAS_TEXT_LG_INDEXER):
yield t
+ def test_regexp(self):
+ self.skipTest('regexp-based pattern matching not implemented in sqlserver')
+
def test_or_having_fake_terms(self):
self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL',
'''SELECT _X.cw_eid
@@ -1749,6 +1764,10 @@
for t in self._parse(WITH_LIMIT):# + ADVANCED_WITH_LIMIT_OR_ORDERBY):
yield t
+ def test_cast(self):
+ self._check("Any CAST(String, P) WHERE P is Personne",
+ '''SELECT CAST(_P.cw_eid AS nvarchar(max))
+FROM cw_Personne AS _P''')
class SqliteSQLGeneratorTC(PostgresSQLGeneratorTC):
@@ -1762,6 +1781,14 @@
'''SELECT MONTH(_P.cw_creation_date)
FROM cw_Personne AS _P''')
+ def test_regexp(self):
+ self._check("Any X WHERE X login REGEXP '[0-9].*'",
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE _X.cw_login REGEXP [0-9].*
+''')
+
+
def test_union(self):
for t in self._parse((
('(Any N ORDERBY 1 WHERE X name N, X is State)'
@@ -1902,6 +1929,18 @@
'''SELECT EXTRACT(MONTH from _P.cw_creation_date)
FROM cw_Personne AS _P''')
+ def test_cast(self):
+ self._check("Any CAST(String, P) WHERE P is Personne",
+ '''SELECT CAST(_P.cw_eid AS mediumtext)
+FROM cw_Personne AS _P''')
+
+ def test_regexp(self):
+ self._check("Any X WHERE X login REGEXP '[0-9].*'",
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE _X.cw_login REGEXP [0-9].*
+''')
+
def test_from_clause_needed(self):
queries = [("Any 1 WHERE EXISTS(T is CWGroup, T name 'managers')",
'''SELECT 1
--- a/server/test/unittest_security.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_security.py Wed Jun 08 17:08:00 2011 +0200
@@ -221,7 +221,7 @@
rset = cu.execute('Personne P')
self.assertEqual(len(rset), 1)
ent = rset.get_entity(0, 0)
- session.set_pool() # necessary
+ session.set_cnxset() # necessary
self.assertRaises(Unauthorized, ent.cw_check_perm, 'update')
self.assertRaises(Unauthorized,
cu.execute, "SET P travaille S WHERE P is Personne, S is Societe")
@@ -579,7 +579,7 @@
cnx = self.login('iaminusersgrouponly')
session = self.session
# needed to avoid check_perm error
- session.set_pool()
+ session.set_cnxset()
# needed to remove rql expr granting update perm to the user
affaire_perms = self.schema['Affaire'].permissions.copy()
self.schema['Affaire'].set_action_permissions('update', self.schema['Affaire'].get_groups('update'))
--- a/server/test/unittest_session.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_session.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,13 +15,12 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""
+from __future__ import with_statement
-"""
from logilab.common.testlib import TestCase, unittest_main, mock_object
from cubicweb.devtools.testlib import CubicWebTC
-from cubicweb.server.session import _make_description
+from cubicweb.server.session import _make_description, hooks_control
class Variable:
def __init__(self, name):
@@ -46,11 +45,38 @@
self.assertEqual(_make_description((Function('max', 'A'), Variable('B')), {}, solution),
['Int','CWUser'])
+
class InternalSessionTC(CubicWebTC):
def test_dbapi_query(self):
session = self.repo.internal_session()
self.assertFalse(session.running_dbapi_query)
session.close()
+
+class SessionTC(CubicWebTC):
+
+ def test_hooks_control(self):
+ session = self.session
+ self.assertEqual(session.hooks_mode, session.HOOKS_ALLOW_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set())
+ self.assertEqual(len(session._tx_data), 1)
+ with hooks_control(session, session.HOOKS_DENY_ALL, 'metadata'):
+ self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
+ session.commit()
+ self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
+ session.rollback()
+ self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
+ # leaving context manager with no transaction running should reset the
+ # transaction local storage (and associated cnxset)
+ self.assertEqual(session._tx_data, {})
+ self.assertEqual(session.cnxset, None)
+
if __name__ == '__main__':
unittest_main()
--- a/server/test/unittest_undo.py Wed Jun 08 15:11:45 2011 +0200
+++ b/server/test/unittest_undo.py Wed Jun 08 17:08:00 2011 +0200
@@ -150,8 +150,8 @@
txuuid = self.commit()
actions = self.cnx.transaction_info(txuuid).actions_list()
self.assertEqual(len(actions), 1)
- toto.clear_all_caches()
- e.clear_all_caches()
+ toto.cw_clear_all_caches()
+ e.cw_clear_all_caches()
errors = self.cnx.undo_transaction(txuuid)
undotxuuid = self.commit()
self.assertEqual(undotxuuid, None) # undo not undoable
@@ -192,7 +192,7 @@
self.commit()
errors = self.cnx.undo_transaction(txuuid)
self.commit()
- p.clear_all_caches()
+ p.cw_clear_all_caches()
self.assertEqual(p.fiche[0].eid, c2.eid)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0],
@@ -232,7 +232,7 @@
self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': c.eid}))
self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': p.eid}))
self.failIf(self.execute('Any X,Y WHERE X fiche Y'))
- self.session.set_pool()
+ self.session.set_cnxset()
for eid in (p.eid, c.eid):
self.failIf(session.system_sql(
'SELECT * FROM entities WHERE eid=%s' % eid).fetchall())
--- a/sobjects/parsers.py Wed Jun 08 15:11:45 2011 +0200
+++ b/sobjects/parsers.py Wed Jun 08 17:08:00 2011 +0200
@@ -31,14 +31,9 @@
"""
-import urllib2
-import StringIO
import os.path as osp
-from cookielib import CookieJar
from datetime import datetime, timedelta
-from lxml import etree
-
from logilab.common.date import todate, totime
from logilab.common.textutils import splitstrip, text_to_dict
@@ -48,10 +43,6 @@
from cubicweb import ValidationError, typed_eid
from cubicweb.server.sources import datafeed
-def ensure_str_keys(dic):
- for key in dic:
- dic[str(key)] = dic.pop(key)
-
# XXX see cubicweb.cwvreg.YAMS_TO_PY
# XXX see cubicweb.web.views.xmlrss.SERIALIZERS
DEFAULT_CONVERTERS = BASE_CONVERTERS.copy()
@@ -72,15 +63,6 @@
return time(seconds=int(ustr))
DEFAULT_CONVERTERS['Interval'] = convert_interval
-# use a cookie enabled opener to use session cookie if any
-_OPENER = urllib2.build_opener()
-try:
- from logilab.common import urllib2ext
- _OPENER.add_handler(urllib2ext.HTTPGssapiAuthHandler())
-except ImportError: # python-kerberos not available
- pass
-_OPENER.add_handler(urllib2.HTTPCookieProcessor(CookieJar()))
-
def extract_typed_attrs(eschema, stringdict, converters=DEFAULT_CONVERTERS):
typeddict = {}
for rschema in eschema.subject_relations():
@@ -116,10 +98,6 @@
item[child.tag] = unicode(child.text)
yield item, rels
-def build_search_rql(etype, attrs):
- restrictions = ['X %(attr)s %%(%(attr)s)s'%{'attr': attr} for attr in attrs]
- return 'Any X WHERE X is %s, %s' % (etype, ', '.join(restrictions))
-
def rtype_role_rql(rtype, role):
if role == 'object':
return 'Y %s X WHERE X eid %%(x)s' % rtype
@@ -138,7 +116,7 @@
raise ValidationError(eid, {rn('options', 'subject'): msg})
-class CWEntityXMLParser(datafeed.DataFeedParser):
+class CWEntityXMLParser(datafeed.DataFeedXMLParser):
"""datafeed parser for the 'xml' entity view"""
__regid__ = 'cw.entityxml'
@@ -147,6 +125,8 @@
'link-or-create': _check_linkattr_option,
'link': _check_linkattr_option,
}
+ parse_etree = staticmethod(_parse_entity_etree)
+
def __init__(self, *args, **kwargs):
super(CWEntityXMLParser, self).__init__(*args, **kwargs)
@@ -208,46 +188,14 @@
# import handling ##########################################################
- def process(self, url, partialcommit=True):
- """IDataFeedParser main entry point"""
- # XXX suppression support according to source configuration. If set, get
- # all cwuri of entities from this source, and compare with newly
- # imported ones
- error = False
- for item, rels in self.parse(url):
- cwuri = item['cwuri']
- try:
- self.process_item(item, rels)
- if partialcommit:
- # commit+set_pool instead of commit(reset_pool=False) to let
- # other a chance to get our pool
- self._cw.commit()
- self._cw.set_pool()
- except ValidationError, exc:
- if partialcommit:
- self.source.error('Skipping %s because of validation error %s' % (cwuri, exc))
- self._cw.rollback()
- self._cw.set_pool()
- error = True
- else:
- raise
- return error
-
- def parse(self, url):
- if not url.startswith('http'):
- stream = StringIO.StringIO(url)
- else:
- for mappedurl in HOST_MAPPING:
- if url.startswith(mappedurl):
- url = url.replace(mappedurl, HOST_MAPPING[mappedurl], 1)
- break
- self.source.info('GET %s', url)
- stream = _OPENER.open(url)
- return _parse_entity_etree(etree.parse(stream).getroot())
+ # XXX suppression support according to source configuration. If set, get all
+ # cwuri of entities from this source, and compare with newly imported ones
def process_item(self, item, rels):
entity = self.extid2entity(str(item.pop('cwuri')), item.pop('cwtype'),
item=item)
+ if entity is None:
+ return None
if not (self.created_during_pull(entity) or self.updated_during_pull(entity)):
self.notify_updated(entity)
item.pop('eid')
@@ -279,17 +227,18 @@
Takes no option.
"""
assert not any(x[1] for x in rules), "'copy' action takes no option"
- ttypes = set([x[0] for x in rules])
- others = [item for item in others if item['cwtype'] in ttypes]
+ ttypes = frozenset([x[0] for x in rules])
eids = [] # local eids
- if not others:
+ for item in others:
+ if item['cwtype'] in ttypes:
+ item, _rels = self._complete_item(item)
+ other_entity = self.process_item(item, [])
+ if other_entity is not None:
+ eids.append(other_entity.eid)
+ if eids:
+ self._set_relation(entity, rtype, role, eids)
+ else:
self._clear_relation(entity, rtype, role, ttypes)
- return
- for item in others:
- item, _rels = self._complete_item(item)
- other_entity = self.process_item(item, [])
- eids.append(other_entity.eid)
- self._set_relation(entity, rtype, role, eids)
def related_link(self, entity, rtype, role, others, rules):
"""implementation of 'link' action
@@ -329,24 +278,22 @@
self.source.error('missing attribute, got %s expected keys %s'
% item, searchattrs)
continue
- kwargs = dict((attr, item[attr]) for attr in searchattrs)
- rql = build_search_rql(item['cwtype'], kwargs)
- rset = self._cw.execute(rql, kwargs)
- if len(rset) > 1:
+ kwargs = dict((str(attr), item[attr]) for attr in searchattrs) # XXX str() needed with python < 2.6
+ targets = tuple(self._cw.find_entities(item['cwtype'], **kwargs))
+ if len(targets) > 1:
self.source.error('ambiguous link: found %s entity %s with attributes %s',
- len(rset), item['cwtype'], kwargs)
- elif len(rset) == 1:
- eids.append(rset[0][0])
+ len(targets), item['cwtype'], kwargs)
+ elif len(targets) == 1:
+ eids.append(targets[0].eid)
elif create_when_not_found:
- ensure_str_keys(kwargs) # XXX necessary with python < 2.6
eids.append(self._cw.create_entity(item['cwtype'], **kwargs).eid)
else:
self.source.error('can not find %s entity with attributes %s',
item['cwtype'], kwargs)
- if not eids:
+ if eids:
+ self._set_relation(entity, rtype, role, eids)
+ else:
self._clear_relation(entity, rtype, role, (ttype,))
- else:
- self._set_relation(entity, rtype, role, eids)
def _complete_item(self, item, add_relations=True):
itemurl = item['cwuri'] + '?vid=xml'
@@ -367,18 +314,16 @@
{'x': entity.eid})
def _set_relation(self, entity, rtype, role, eids):
+ assert eids
rqlbase = rtype_role_rql(rtype, role)
- rql = 'DELETE %s' % rqlbase
- if eids:
- eidstr = ','.join(str(eid) for eid in eids)
- rql += ', NOT Y eid IN (%s)' % eidstr
+ eidstr = ','.join(str(eid) for eid in eids)
+ self._cw.execute('DELETE %s, NOT Y eid IN (%s)' % (rqlbase, eidstr),
+ {'x': entity.eid})
+ if role == 'object':
+ rql = 'SET %s, Y eid IN (%s), NOT Y %s X' % (rqlbase, eidstr, rtype)
+ else:
+ rql = 'SET %s, Y eid IN (%s), NOT X %s Y' % (rqlbase, eidstr, rtype)
self._cw.execute(rql, {'x': entity.eid})
- if eids:
- if role == 'object':
- rql = 'SET %s, Y eid IN (%s), NOT Y %s X' % (rqlbase, eidstr, rtype)
- else:
- rql = 'SET %s, Y eid IN (%s), NOT X %s Y' % (rqlbase, eidstr, rtype)
- self._cw.execute(rql, {'x': entity.eid})
def registration_callback(vreg):
vreg.register_all(globals().values(), __name__)
--- a/sobjects/test/unittest_parsers.py Wed Jun 08 15:11:45 2011 +0200
+++ b/sobjects/test/unittest_parsers.py Wed Jun 08 17:08:00 2011 +0200
@@ -156,14 +156,55 @@
self.assertEqual(tag.cwuri, 'http://testing.fr/cubicweb/%s' % tag.eid)
self.assertEqual(tag.cw_source[0].name, 'system')
+ session.set_cnxset()
stats = dfsource.pull_data(session, force=True, raise_on_error=True)
self.assertEqual(stats['created'], set())
self.assertEqual(len(stats['updated']), 2)
self.repo._type_source_cache.clear()
self.repo._extid_cache.clear()
+ session.set_cnxset()
stats = dfsource.pull_data(session, force=True, raise_on_error=True)
self.assertEqual(stats['created'], set())
self.assertEqual(len(stats['updated']), 2)
+ session.commit()
+
+ # test move to system source
+ self.sexecute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': email.eid})
+ self.commit()
+ rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, email.eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system'},
+ 'type': 'EmailAddress',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
+ self.commit()
+ # test everything is still fine after source synchronization
+ session.set_cnxset()
+ stats = dfsource.pull_data(session, force=True, raise_on_error=True)
+ rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, email.eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system'},
+ 'type': 'EmailAddress',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
+ session.commit()
+
+ # test delete entity
+ e.cw_delete()
+ self.commit()
+ # test everything is still fine after source synchronization
+ session.set_cnxset()
+ stats = dfsource.pull_data(session, force=True, raise_on_error=True)
+ rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 0)
+ rset = self.sexecute('Any X WHERE X use_email E, X login "sthenault"')
+ self.assertEqual(len(rset), 0)
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/test/unittest_entity.py Wed Jun 08 15:11:45 2011 +0200
+++ b/test/unittest_entity.py Wed Jun 08 17:08:00 2011 +0200
@@ -572,7 +572,7 @@
self.assertEqual(person.rest_path(), 'personne/doe')
# ambiguity test
person2 = req.create_entity('Personne', prenom=u'remi', nom=u'doe')
- person.clear_all_caches()
+ person.cw_clear_all_caches()
self.assertEqual(person.rest_path(), 'personne/eid/%s' % person.eid)
self.assertEqual(person2.rest_path(), 'personne/eid/%s' % person2.eid)
# unique attr with None value (wikiid in this case)
--- a/test/unittest_schema.py Wed Jun 08 15:11:45 2011 +0200
+++ b/test/unittest_schema.py Wed Jun 08 17:08:00 2011 +0200
@@ -29,7 +29,7 @@
from yams import BadSchemaDefinition
from yams.constraints import SizeConstraint, StaticVocabularyConstraint
from yams.buildobjs import RelationDefinition, EntityType, RelationType
-from yams.reader import PyFileReader
+from yams.reader import fill_schema
from cubicweb.schema import (
CubicWebSchema, CubicWebEntitySchema, CubicWebSchemaLoader,
@@ -158,7 +158,7 @@
self.assert_(isinstance(schema, CubicWebSchema))
self.assertEqual(schema.name, 'data')
entities = sorted([str(e) for e in schema.entities()])
- expected_entities = ['BaseTransition', 'Bookmark', 'Boolean', 'Bytes', 'Card',
+ expected_entities = ['BaseTransition', 'BigInt', 'Bookmark', 'Boolean', 'Bytes', 'Card',
'Date', 'Datetime', 'Decimal',
'CWCache', 'CWConstraint', 'CWConstraintType', 'CWEType',
'CWAttribute', 'CWGroup', 'EmailAddress', 'CWRelation',
@@ -208,7 +208,7 @@
'read_permission', 'relation_type', 'relations', 'require_group',
- 'specializes', 'state_of', 'subworkflow', 'subworkflow_exit', 'subworkflow_state', 'surname', 'symmetric', 'synopsis',
+ 'specializes', 'state_of', 'subworkflow', 'subworkflow_exit', 'subworkflow_state', 'surname', 'symmetric', 'synchronizing', 'synopsis',
'tags', 'timestamp', 'title', 'to_entity', 'to_state', 'transition_of', 'travaille', 'type',
@@ -259,18 +259,23 @@
self.assertEqual([x.expression for x in aschema.get_rqlexprs('update')],
['U has_update_permission X'])
+ def test_nonregr_allowed_type_names(self):
+ schema = CubicWebSchema('Test Schema')
+ schema.add_entity_type(EntityType('NaN'))
+
+
class BadSchemaTC(TestCase):
def setUp(self):
self.loader = CubicWebSchemaLoader()
self.loader.defined = {}
self.loader.loaded_files = []
self.loader.post_build_callbacks = []
- self.loader._pyreader = PyFileReader(self.loader)
def _test(self, schemafile, msg):
self.loader.handle_file(join(DATADIR, schemafile))
+ sch = self.loader.schemacls('toto')
with self.assertRaises(BadSchemaDefinition) as cm:
- self.loader._build_schema('toto', False)
+ fill_schema(sch, self.loader.defined, False)
self.assertEqual(str(cm.exception), msg)
def test_lowered_etype(self):
--- a/test/unittest_selectors.py Wed Jun 08 15:11:45 2011 +0200
+++ b/test/unittest_selectors.py Wed Jun 08 17:08:00 2011 +0200
@@ -102,6 +102,10 @@
self.assertIs(csel.search_selector(is_instance), sel)
csel = AndSelector(Selector(), sel)
self.assertIs(csel.search_selector(is_instance), sel)
+ self.assertIs(csel.search_selector((AndSelector, OrSelector)), csel)
+ self.assertIs(csel.search_selector((OrSelector, AndSelector)), csel)
+ self.assertIs(csel.search_selector((is_instance, score_entity)), sel)
+ self.assertIs(csel.search_selector((score_entity, is_instance)), sel)
def test_inplace_and(self):
selector = _1_()
@@ -193,7 +197,7 @@
class WorkflowSelectorTC(CubicWebTC):
def _commit(self):
self.commit()
- self.wf_entity.clear_all_caches()
+ self.wf_entity.cw_clear_all_caches()
def setup_database(self):
wf = self.shell().add_workflow("wf_test", 'StateFull', default=True)
--- a/test/unittest_utils.py Wed Jun 08 15:11:45 2011 +0200
+++ b/test/unittest_utils.py Wed Jun 08 17:08:00 2011 +0200
@@ -22,8 +22,8 @@
import datetime
from logilab.common.testlib import TestCase, unittest_main
-
-from cubicweb.utils import make_uid, UStringIO, SizeConstrainedList, RepeatList
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.utils import make_uid, UStringIO, SizeConstrainedList, RepeatList, HTMLHead
from cubicweb.entity import Entity
try:
@@ -155,6 +155,102 @@
def test_encoding_unknown_stuff(self):
self.assertEqual(self.encode(TestCase), 'null')
+class HTMLHeadTC(CubicWebTC):
+ def test_concat_urls(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead(base_url)
+ urls = [base_url + u'bob1.js',
+ base_url + u'bob2.js',
+ base_url + u'bob3.js']
+ result = head.concat_urls(urls)
+ expected = u'http://test.fr/data/??bob1.js,bob2.js,bob3.js'
+ self.assertEqual(result, expected)
+
+ def test_group_urls(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead(base_url)
+ urls_spec = [(base_url + u'bob0.js', None),
+ (base_url + u'bob1.js', None),
+ (u'http://ext.com/bob2.js', None),
+ (u'http://ext.com/bob3.js', None),
+ (base_url + u'bob4.css', 'all'),
+ (base_url + u'bob5.css', 'all'),
+ (base_url + u'bob6.css', 'print'),
+ (base_url + u'bob7.css', 'print'),
+ (base_url + u'bob8.css', ('all', u'[if IE 8]')),
+ (base_url + u'bob9.css', ('print', u'[if IE 8]'))
+ ]
+ result = head.group_urls(urls_spec)
+ expected = [(base_url + u'??bob0.js,bob1.js', None),
+ (u'http://ext.com/bob2.js', None),
+ (u'http://ext.com/bob3.js', None),
+ (base_url + u'??bob4.css,bob5.css', 'all'),
+ (base_url + u'??bob6.css,bob7.css', 'print'),
+ (base_url + u'bob8.css', ('all', u'[if IE 8]')),
+ (base_url + u'bob9.css', ('print', u'[if IE 8]'))
+ ]
+ self.assertEqual(list(result), expected)
+
+ def test_getvalue_with_concat(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead(base_url)
+ head.add_js(base_url + u'bob0.js')
+ head.add_js(base_url + u'bob1.js')
+ head.add_js(u'http://ext.com/bob2.js')
+ head.add_js(u'http://ext.com/bob3.js')
+ head.add_css(base_url + u'bob4.css')
+ head.add_css(base_url + u'bob5.css')
+ head.add_css(base_url + u'bob6.css', 'print')
+ head.add_css(base_url + u'bob7.css', 'print')
+ head.add_ie_css(base_url + u'bob8.css')
+ head.add_ie_css(base_url + u'bob9.css', 'print', u'[if lt IE 7]')
+ result = head.getvalue()
+ expected = u"""<head>
+<link rel="stylesheet" type="text/css" media="all" href="http://test.fr/data/??bob4.css,bob5.css"/>
+<link rel="stylesheet" type="text/css" media="print" href="http://test.fr/data/??bob6.css,bob7.css"/>
+<!--[if lt IE 8]>
+<link rel="stylesheet" type="text/css" media="all" href="http://test.fr/data/bob8.css"/>
+<!--[if lt IE 7]>
+<link rel="stylesheet" type="text/css" media="print" href="http://test.fr/data/bob9.css"/>
+<![endif]-->
+<script type="text/javascript" src="http://test.fr/data/??bob0.js,bob1.js"></script>
+<script type="text/javascript" src="http://ext.com/bob2.js"></script>
+<script type="text/javascript" src="http://ext.com/bob3.js"></script>
+</head>
+"""
+ self.assertEqual(result, expected)
+
+ def test_getvalue_without_concat(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead()
+ head.add_js(base_url + u'bob0.js')
+ head.add_js(base_url + u'bob1.js')
+ head.add_js(u'http://ext.com/bob2.js')
+ head.add_js(u'http://ext.com/bob3.js')
+ head.add_css(base_url + u'bob4.css')
+ head.add_css(base_url + u'bob5.css')
+ head.add_css(base_url + u'bob6.css', 'print')
+ head.add_css(base_url + u'bob7.css', 'print')
+ head.add_ie_css(base_url + u'bob8.css')
+ head.add_ie_css(base_url + u'bob9.css', 'print', u'[if lt IE 7]')
+ result = head.getvalue()
+ expected = u"""<head>
+<link rel="stylesheet" type="text/css" media="all" href="http://test.fr/data/bob4.css"/>
+<link rel="stylesheet" type="text/css" media="all" href="http://test.fr/data/bob5.css"/>
+<link rel="stylesheet" type="text/css" media="print" href="http://test.fr/data/bob6.css"/>
+<link rel="stylesheet" type="text/css" media="print" href="http://test.fr/data/bob7.css"/>
+<!--[if lt IE 8]>
+<link rel="stylesheet" type="text/css" media="all" href="http://test.fr/data/bob8.css"/>
+<!--[if lt IE 7]>
+<link rel="stylesheet" type="text/css" media="print" href="http://test.fr/data/bob9.css"/>
+<![endif]-->
+<script type="text/javascript" src="http://test.fr/data/bob0.js"></script>
+<script type="text/javascript" src="http://test.fr/data/bob1.js"></script>
+<script type="text/javascript" src="http://ext.com/bob2.js"></script>
+<script type="text/javascript" src="http://ext.com/bob3.js"></script>
+</head>
+"""
+ self.assertEqual(result, expected)
if __name__ == '__main__':
unittest_main()
--- a/toolsutils.py Wed Jun 08 15:11:45 2011 +0200
+++ b/toolsutils.py Wed Jun 08 17:08:00 2011 +0200
@@ -159,15 +159,11 @@
print '-> set permissions to 0600 for %s' % filepath
chmod(filepath, 0600)
-def read_config(config_file):
- """read the instance configuration from a file and return it as a
- dictionnary
-
- :type config_file: str
- :param config_file: path to the configuration file
-
- :rtype: dict
- :return: a dictionary with specified values associated to option names
+def read_config(config_file, raise_if_unreadable=False):
+ """read some simple configuration from `config_file` and return it as a
+ dictionary. If `raise_if_unreadable` is false (the default), an empty
+ dictionary will be returned if the file is inexistant or unreadable, else
+ :exc:`ExecutionError` will be raised.
"""
from logilab.common.fileutils import lines
config = current = {}
@@ -190,8 +186,12 @@
value = value.strip()
current[option] = value or None
except IOError, ex:
- warning('missing or non readable configuration file %s (%s)',
- config_file, ex)
+ if raise_if_unreadable:
+ raise ExecutionError('%s. Are you logged with the correct user '
+ 'to use this instance?' % ex)
+ else:
+ warning('missing or non readable configuration file %s (%s)',
+ config_file, ex)
return config
--- a/uilib.py Wed Jun 08 15:11:45 2011 +0200
+++ b/uilib.py Wed Jun 08 17:08:00 2011 +0200
@@ -62,12 +62,18 @@
return value
if attrtype == 'Date':
return ustrftime(value, req.property_value('ui.date-format'))
- if attrtype in ('Time', 'TZTime'):
+ if attrtype == 'Time':
return ustrftime(value, req.property_value('ui.time-format'))
- if attrtype in ('Datetime', 'TZDatetime'):
+ if attrtype == 'TZTime':
+ return ustrftime(value, req.property_value('ui.time-format')) + u' UTC'
+ if attrtype == 'Datetime':
if displaytime:
return ustrftime(value, req.property_value('ui.datetime-format'))
return ustrftime(value, req.property_value('ui.date-format'))
+ if attrtype == 'TZDatetime':
+ if displaytime:
+ return ustrftime(value, req.property_value('ui.datetime-format')) + u' UTC'
+ return ustrftime(value, req.property_value('ui.date-format'))
if attrtype == 'Boolean':
if value:
return req._('yes')
--- a/utils.py Wed Jun 08 15:11:45 2011 +0200
+++ b/utils.py Wed Jun 08 17:08:00 2011 +0200
@@ -51,20 +51,6 @@
return str(key) + uuid4().hex
-def dump_class(cls, clsname):
- """create copy of a class by creating an empty class inheriting
- from the given cls.
-
- Those class will be used as place holder for attribute and relation
- description
- """
- # type doesn't accept unicode name
- # return type.__new__(type, str(clsname), (cls,), {})
- # __autogenerated__ attribute is just a marker
- return type(str(clsname), (cls,), {'__autogenerated__': True,
- '__doc__': cls.__doc__,
- '__module__': cls.__module__})
-
def support_args(callable, *argnames):
"""return true if the callable support given argument names"""
if isinstance(callable, type):
@@ -240,7 +226,7 @@
xhtml_safe_script_opening = u'<script type="text/javascript"><!--//--><![CDATA[//><!--\n'
xhtml_safe_script_closing = u'\n//--><!]]></script>'
- def __init__(self):
+ def __init__(self, datadir_url=None):
super(HTMLHead, self).__init__()
self.jsvars = []
self.jsfiles = []
@@ -248,6 +234,7 @@
self.ie_cssfiles = []
self.post_inlined_scripts = []
self.pagedata_unload = False
+ self.datadir_url = datadir_url
def add_raw(self, rawheader):
@@ -284,7 +271,7 @@
if jsfile not in self.jsfiles:
self.jsfiles.append(jsfile)
- def add_css(self, cssfile, media):
+ def add_css(self, cssfile, media='all'):
"""adds `cssfile` to the list of javascripts used in the webpage
This function checks if the file has already been added
@@ -304,6 +291,45 @@
self.post_inlined_scripts.append(self.js_unload_code)
self.pagedata_unload = True
+ def concat_urls(self, urls):
+ """concatenates urls into one url usable by Apache mod_concat
+
+ This method returns the url without modifying it if there is only
+ one element in the list
+ :param urls: list of local urls/filenames to concatenate
+ """
+ if len(urls) == 1:
+ return urls[0]
+ len_prefix = len(self.datadir_url)
+ concated = u','.join(url[len_prefix:] for url in urls)
+ return (u'%s??%s' % (self.datadir_url, concated))
+
+ def group_urls(self, urls_spec):
+ """parses urls_spec in order to generate concatenated urls
+ for js and css includes
+
+ This method checks if the file is local and if it shares options
+ with direct neighbors
+ :param urls_spec: entire list of urls/filenames to inspect
+ """
+ concatable = []
+ prev_islocal = False
+ prev_key = None
+ for url, key in urls_spec:
+ islocal = url.startswith(self.datadir_url)
+ if concatable and (islocal != prev_islocal or key != prev_key):
+ yield (self.concat_urls(concatable), prev_key)
+ del concatable[:]
+ if not islocal:
+ yield (url, key)
+ else:
+ concatable.append(url)
+ prev_islocal = islocal
+ prev_key = key
+ if concatable:
+ yield (self.concat_urls(concatable), prev_key)
+
+
def getvalue(self, skiphead=False):
"""reimplement getvalue to provide a consistent (and somewhat browser
optimzed cf. http://stevesouders.com/cuzillion) order in external
@@ -321,18 +347,20 @@
w(vardecl + u'\n')
w(self.xhtml_safe_script_closing)
# 2/ css files
- for cssfile, media in self.cssfiles:
+ for cssfile, media in (self.group_urls(self.cssfiles) if self.datadir_url else self.cssfiles):
w(u'<link rel="stylesheet" type="text/css" media="%s" href="%s"/>\n' %
(media, xml_escape(cssfile)))
# 3/ ie css if necessary
if self.ie_cssfiles:
- for cssfile, media, iespec in self.ie_cssfiles:
+ ie_cssfiles = ((x, (y, z)) for x, y, z in self.ie_cssfiles)
+ for cssfile, (media, iespec) in (self.group_urls(ie_cssfiles) if self.datadir_url else ie_cssfiles):
w(u'<!--%s>\n' % iespec)
w(u'<link rel="stylesheet" type="text/css" media="%s" href="%s"/>\n' %
(media, xml_escape(cssfile)))
w(u'<![endif]--> \n')
# 4/ js files
- for jsfile in self.jsfiles:
+ jsfiles = ((x, None) for x in self.jsfiles)
+ for jsfile, media in self.group_urls(jsfiles) if self.datadir_url else jsfiles:
w(u'<script type="text/javascript" src="%s"></script>\n' %
xml_escape(jsfile))
# 5/ post inlined scripts (i.e. scripts depending on other JS files)
--- a/vregistry.py Wed Jun 08 15:11:45 2011 +0200
+++ b/vregistry.py Wed Jun 08 17:08:00 2011 +0200
@@ -184,7 +184,10 @@
raise :exc:`NoSelectableObject` if not object apply
"""
- return self._select_best(self[__oid], *args, **kwargs)
+ obj = self._select_best(self[__oid], *args, **kwargs)
+ if obj is None:
+ raise NoSelectableObject(args, kwargs, self[__oid] )
+ return obj
def select_or_none(self, __oid, *args, **kwargs):
"""return the most specific object among those with the given oid
@@ -202,16 +205,18 @@
context
"""
for appobjects in self.itervalues():
- try:
- yield self._select_best(appobjects, *args, **kwargs)
- except NoSelectableObject:
+ obj = self._select_best(appobjects, *args, **kwargs)
+ if obj is None:
continue
+ yield obj
def _select_best(self, appobjects, *args, **kwargs):
"""return an instance of the most specific object according
to parameters
- raise `NoSelectableObject` if not object apply
+ return None if not object apply (don't raise `NoSelectableObject` since
+ it's costly when searching appobjects using `possible_objects`
+ (e.g. searching for hooks).
"""
if len(args) > 1:
warn('[3.5] only the request param can not be named when calling select*',
@@ -224,7 +229,7 @@
elif appobjectscore > 0 and appobjectscore == score:
winners.append(appobject)
if winners is None:
- raise NoSelectableObject(args, kwargs, appobjects)
+ return None
if len(winners) > 1:
# log in production environement / test, error while debugging
msg = 'select ambiguity: %s\n(args: %s, kwargs: %s)'
--- a/web/component.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/component.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -57,8 +57,6 @@
page_link_templ = u'<span class="slice"><a href="%s" title="%s">%s</a></span>'
selected_page_link_templ = u'<span class="selectedSlice"><a href="%s" title="%s">%s</a></span>'
previous_page_link_templ = next_page_link_templ = page_link_templ
- no_previous_page_link = u'<<'
- no_next_page_link = u'>>'
def __init__(self, req, rset, **kwargs):
super(NavigationComponent, self).__init__(req, rset=rset, **kwargs)
@@ -131,7 +129,37 @@
return self.selected_page_link_templ % (url, content, content)
return self.page_link_templ % (url, content, content)
- def previous_link(self, path, params, content='<<', title=_('previous_results')):
+ @property
+ def prev_icon_url(self):
+ return xml_escape(self._cw.data_url('go_prev.png'))
+
+ @property
+ def next_icon_url(self):
+ return xml_escape(self._cw.data_url('go_next.png'))
+
+ @property
+ def no_previous_page_link(self):
+ return (u'<img src="%s" alt="%s" class="prevnext_nogo"/>' %
+ (self.prev_icon_url, self._cw._('there is no previous page')))
+
+ @property
+ def no_next_page_link(self):
+ return (u'<img src="%s" alt="%s" class="prevnext_nogo"/>' %
+ (self.next_icon_url, self._cw._('there is no next page')))
+
+ @property
+ def no_content_prev_link(self):
+ return (u'<img src="%s" alt="%s" class="prevnext"/>' % (
+ (self.prev_icon_url, self._cw._('no content prev link'))))
+
+ @property
+ def no_content_next_link(self):
+ return (u'<img src="%s" alt="%s" class="prevnext"/>' %
+ (self.next_icon_url, self._cw._('no content next link')))
+
+ def previous_link(self, path, params, content=None, title=_('previous_results')):
+ if not content:
+ content = self.no_content_prev_link
start = self.starting_from
if not start :
return self.no_previous_page_link
@@ -140,7 +168,9 @@
url = xml_escape(self.page_url(path, params, start, stop))
return self.previous_page_link_templ % (url, title, content)
- def next_link(self, path, params, content='>>', title=_('next_results')):
+ def next_link(self, path, params, content=None, title=_('next_results')):
+ if not content:
+ content = self.no_content_next_link
start = self.starting_from + self.page_size
if start >= self.total:
return self.no_next_page_link
--- a/web/controller.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/controller.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -114,7 +114,7 @@
[recipient], body, subject)
if not self._cw.vreg.config.sendmails([(msg, [recipient])]):
msg = self._cw._('could not connect to the SMTP server')
- url = self._cw.build_url(__message=msg)
+ url = self._cw.build_url(__message=msgid)
raise Redirect(url)
def reset(self):
@@ -123,8 +123,10 @@
"""
newparams = {}
# sets message if needed
- if self._cw.message:
- newparams['_cwmsgid'] = self._cw.set_redirect_message(self._cw.message)
+ # XXX - don't call .message twice since it pops the id
+ msg = self._cw.message
+ if msg:
+ newparams['_cwmsgid'] = self._cw.set_redirect_message(msg)
if self._cw.form.has_key('__action_apply'):
self._return_to_edition_view(newparams)
if self._cw.form.has_key('__action_cancel'):
@@ -165,7 +167,7 @@
elif self._edited_entity:
# clear caches in case some attribute participating to the rest path
# has been modified
- self._edited_entity.clear_all_caches()
+ self._edited_entity.cw_clear_all_caches()
path = self._edited_entity.rest_path()
else:
path = 'view'
--- a/web/data/cubicweb.ajax.js Wed Jun 08 15:11:45 2011 +0200
+++ b/web/data/cubicweb.ajax.js Wed Jun 08 17:08:00 2011 +0200
@@ -22,6 +22,9 @@
*
* dummy ultra minimalist implementation of deferred for jQuery
*/
+
+cw.ajax = new Namespace('cw.ajax');
+
function Deferred() {
this.__init__(this);
}
@@ -86,6 +89,65 @@
var JSON_BASE_URL = baseuri() + 'json?';
+
+jQuery.extend(cw.ajax, {
+ /* variant of jquery evalScript with cache: true in ajax call */
+ _evalscript: function ( i, elem ) {
+ if ( elem.src ) {
+ jQuery.ajax({
+ url: elem.src,
+ async: false,
+ cache: true,
+ dataType: "script"
+ });
+ } else {
+ jQuery.globalEval( elem.text || elem.textContent || elem.innerHTML || "" );
+ }
+ if ( elem.parentNode ) {
+ elem.parentNode.removeChild( elem );
+ }
+ },
+
+ evalscripts: function ( scripts ) {
+ if ( scripts.length ) {
+ jQuery.each(scripts, cw.ajax._evalscript);
+ }
+ },
+
+ /**
+ * returns true if `url` is a mod_concat-like url
+ * (e.g. http://..../data??resource1.js,resource2.js)
+ */
+ _modconcatLikeUrl: function(url) {
+ var base = baseuri();
+ if (!base.endswith('/')) { base += '/'; }
+ var modconcat_rgx = new RegExp('(' + base + 'data/([a-z0-9]+/)?)\\?\\?(.+)');
+ return modconcat_rgx.exec(url);
+ },
+
+ /**
+ * decomposes a mod_concat-like url into its corresponding list of
+ * resources' urls
+ * >>> _listResources('http://foo.com/data/??a.js,b.js,c.js')
+ * ['http://foo.com/data/a.js', 'http://foo.com/data/b.js', 'http://foo.com/data/c.js']
+ */
+ _listResources: function(src) {
+ var resources = [];
+ var groups = cw.ajax._modconcatLikeUrl(src);
+ if (groups == null) {
+ resources.push(src);
+ } else {
+ var dataurl = groups[1];
+ $.each(cw.utils.lastOf(groups).split(','),
+ function() {
+ resources.push(dataurl + this);
+ }
+ );
+ }
+ return resources;
+ }
+});
+
//============= utility function handling remote calls responses. ==============//
function _loadAjaxHtmlHead($node, $head, tag, srcattr) {
var jqtagfilter = tag + '[' + srcattr + ']';
@@ -93,28 +155,47 @@
cw['loaded_'+srcattr] = [];
var loaded = cw['loaded_'+srcattr];
jQuery('head ' + jqtagfilter).each(function(i) {
- loaded.push(this.getAttribute(srcattr));
- });
+ // tab1.push.apply(tab1, tab2) <=> tab1 += tab2 (python-wise)
+ loaded.push.apply(loaded, cw.ajax._listResources(this.getAttribute(srcattr)));
+ });
} else {
var loaded = cw['loaded_'+srcattr];
}
$node.find(tag).each(function(i) {
- var url = this.getAttribute(srcattr);
+ var $srcnode = jQuery(this);
+ var url = $srcnode.attr(srcattr);
if (url) {
- if (jQuery.inArray(url, loaded) == -1) {
- // take care to <script> tags: jQuery append method script nodes
- // don't appears in the DOM (See comments on
- // http://api.jquery.com/append/), which cause undesired
- // duplicated load in our case. After trying to use bare DOM api
- // to avoid this, we switched to handle a list of already loaded
- // stuff ourselves, since bare DOM api gives bug with the
- // server-response event, since we loose control on when the
- // script is loaded (jQuery load it immediatly).
- loaded.push(url);
- jQuery(this).appendTo($head);
+ /* special handling of <script> tags: script nodes appended by jquery
+ * use uncached ajax calls and do not appear in the DOM
+ * (See comments in response to Syt on // http://api.jquery.com/append/),
+ * which cause undesired duplicated load in our case. We now handle
+ * a list of already loaded resources, since bare DOM api gives bugs with the
+ * server-response event, and we lose control on when the
+ * script is loaded (jQuery loads it immediately). */
+ var resources = cw.ajax._listResources(url);
+ var missingResources = $.grep(resources, function(resource) {
+ return $.inArray(resource, loaded) == -1;
+ });
+ loaded.push.apply(loaded, missingResources);
+ if (missingResources.length == 1) {
+ // only one resource missing: build a node with a single resource url
+ // (maybe the browser has it in cache already)
+ $srcnode.attr(srcattr, missingResources[0]);
+ } else if (missingResources.length > 1) {
+ // several resources missing: build a node with a concatenated
+ // resources url
+ var dataurl = cw.ajax._modconcatLikeUrl(url)[1];
+ var missing_path = $.map(missingResources, function(resource) {
+ return resource.substring(dataurl.length);
+ });
+ $srcnode.attr(srcattr, dataurl + '??' + missing_path.join(','));
}
- } else {
- jQuery(this).appendTo($head);
+ // === will work if both arguments are of the same type
+ if ( $srcnode.attr('type') === 'text/javascript' ) {
+ cw.ajax.evalscripts($srcnode);
+ } else {
+ $srcnode.appendTo($head);
+ }
}
});
$node.find(jqtagfilter).remove();
--- a/web/data/cubicweb.css Wed Jun 08 15:11:45 2011 +0200
+++ b/web/data/cubicweb.css Wed Jun 08 17:08:00 2011 +0200
@@ -120,6 +120,19 @@
border: none;
}
+
+img.prevnext {
+ width: 22px;
+ height: 22px;
+}
+
+img.prevnext_nogo {
+ width: 22px;
+ height: 22px;
+ filter:alpha(opacity=25); /* IE */
+ opacity:.25;
+}
+
fieldset {
border: none;
}
--- a/web/data/cubicweb.facets.css Wed Jun 08 15:11:45 2011 +0200
+++ b/web/data/cubicweb.facets.css Wed Jun 08 17:08:00 2011 +0200
@@ -109,11 +109,25 @@
div#facetLoading {
display: none;
position: fixed;
- padding-left: 20px;
+ background: #f2f2f2;
top: 400px;
width: 200px;
- height: 100px;
+ padding: 1em;
font-size: 120%;
font-weight: bold;
text-align: center;
}
+
+div.facetTitleSelected {
+ background: url("required.png") no-repeat right top;
+}
+
+table.filter {
+ background-color: #EBE8D9;
+ border: dotted grey 1px;
+}
+
+div.facet {
+ padding: none;
+ margin: .3em!important;
+}
--- a/web/data/cubicweb.facets.js Wed Jun 08 15:11:45 2011 +0200
+++ b/web/data/cubicweb.facets.js Wed Jun 08 17:08:00 2011 +0200
@@ -238,6 +238,18 @@
});
}
+// change css class of facets that have a value selected
+function updateFacetTitles() {
+ $('.facet').each(function() {
+ var $divTitle = $(this).find('.facetTitle');
+ var facetSelected = $(this).find('.facetValueSelected');
+ if (facetSelected.length) {
+ $divTitle.addClass('facetTitleSelected');
+ } else {
+ $divTitle.removeClass('facetTitleSelected');
+ }
+ });
+}
// we need to differenciate cases where initFacetBoxEvents is called with one
// argument or without any argument. If we use `initFacetBoxEvents` as the
@@ -245,4 +257,34 @@
// his, so we use this small anonymous function instead.
jQuery(document).ready(function() {
initFacetBoxEvents();
+ jQuery(cw).bind('facets-content-loaded', onFacetContentLoaded);
+ jQuery(cw).bind('facets-content-loading', onFacetFiltering);
+ jQuery(cw).bind('facets-content-loading', updateFacetTitles);
});
+
+function showFacetLoading(parentid) {
+ var loadingWidth = 200; // px
+ var loadingHeight = 100; // px
+ var $msg = jQuery('#facetLoading');
+ var $parent = jQuery('#' + parentid);
+ var leftPos = $parent.offset().left + ($parent.width() - loadingWidth) / 2;
+ $parent.fadeTo('normal', 0.2);
+ $msg.css('left', leftPos).show();
+}
+
+function onFacetFiltering(event, divid /* ... */) {
+ showFacetLoading(divid);
+}
+
+function onFacetContentLoaded(event, divid, rql, vid, extraparams) {
+ jQuery('#facetLoading').hide();
+}
+
+jQuery(document).ready(function () {
+ if (jQuery('div.facetBody').length) {
+ var $loadingDiv = $(DIV({id:'facetLoading'},
+ facetLoadingMsg));
+ $loadingDiv.corner();
+ $('body').append($loadingDiv);
+ }
+});
--- a/web/data/cubicweb.js Wed Jun 08 15:11:45 2011 +0200
+++ b/web/data/cubicweb.js Wed Jun 08 17:08:00 2011 +0200
@@ -308,6 +308,17 @@
},
/**
+ * returns the last element of an array-like object or undefined if empty
+ */
+ lastOf: function(array) {
+ if (array.length) {
+ return array[array.length-1];
+ } else {
+ return undefined;
+ }
+ },
+
+ /**
* .. function:: difference(lst1, lst2)
*
* returns a list containing all elements in `lst1` that are not
--- a/web/data/cubicweb.old.css Wed Jun 08 15:11:45 2011 +0200
+++ b/web/data/cubicweb.old.css Wed Jun 08 17:08:00 2011 +0200
@@ -69,6 +69,18 @@
text-align: center;
}
+img.prevnext {
+ width: 22px;
+ height: 22px;
+}
+
+img.prevnext_nogo {
+ width: 22px;
+ height: 22px;
+ filter:alpha(opacity=25); /* IE */
+ opacity:.25;
+}
+
p {
margin: 0em 0px 0.2em;
padding-top: 2px;
@@ -613,7 +625,7 @@
span.selectedSlice a:visited,
span.selectedSlice a {
- color: #000;
+ background-color: #EBE8D9;
}
/* FIXME should be moved to cubes/folder */
Binary file web/data/go_next.png has changed
Binary file web/data/go_prev.png has changed
--- a/web/facet.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/facet.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -405,6 +405,10 @@
"""
raise NotImplementedError
+ @property
+ def wdgclass(self):
+ raise NotImplementedError
+
class VocabularyFacet(AbstractFacet):
"""This abstract class extend :class:`AbstractFacet` to use the
@@ -418,6 +422,10 @@
"""
needs_update = True
+ @property
+ def wdgclass(self):
+ return FacetVocabularyWidget
+
def get_widget(self):
"""Return the widget instance to use to display this facet.
@@ -427,7 +435,7 @@
vocab = self.vocabulary()
if len(vocab) <= 1:
return None
- wdg = FacetVocabularyWidget(self)
+ wdg = self.wdgclass(self)
selected = frozenset(typed_eid(eid) for eid in self._cw.list_form_param(self.__regid__))
for label, value in vocab:
if value is None:
@@ -1051,18 +1059,22 @@
self.facet = facet
self.items = []
+ def height(self):
+ return len(self.items) + 1
+
def append(self, item):
self.items.append(item)
def _render(self):
+ w = self.w
title = xml_escape(self.facet.title)
facetid = xml_escape(self.facet.__regid__)
- self.w(u'<div id="%s" class="facet">\n' % facetid)
- self.w(u'<div class="facetTitle" cubicweb:facetName="%s">%s</div>\n' %
- (xml_escape(facetid), title))
+ w(u'<div id="%s" class="facet">\n' % facetid)
+ w(u'<div class="facetTitle" cubicweb:facetName="%s">%s</div>\n' %
+ (xml_escape(facetid), title))
if self.facet.support_and():
_ = self.facet._cw._
- self.w(u'''<select name="%s" class="radio facetOperator" title="%s">
+ w(u'''<select name="%s" class="radio facetOperator" title="%s">
<option value="OR">%s</option>
<option value="AND">%s</option>
</select>''' % (facetid + '_andor', _('and/or between different values'),
@@ -1072,11 +1084,11 @@
cssclass += ' hidden'
if len(self.items) > 6:
cssclass += ' overflowed'
- self.w(u'<div class="%s">\n' % cssclass)
+ w(u'<div class="%s">\n' % cssclass)
for item in self.items:
- item.render(w=self.w)
- self.w(u'</div>\n')
- self.w(u'</div>\n')
+ item.render(w=w)
+ w(u'</div>\n')
+ w(u'</div>\n')
class FacetStringWidget(HTMLWidget):
@@ -1084,14 +1096,18 @@
self.facet = facet
self.value = None
+ def height(self):
+ return 3
+
def _render(self):
+ w = self.w
title = xml_escape(self.facet.title)
facetid = xml_escape(self.facet.__regid__)
- self.w(u'<div id="%s" class="facet">\n' % facetid)
- self.w(u'<div class="facetTitle" cubicweb:facetName="%s">%s</div>\n' %
+ w(u'<div id="%s" class="facet">\n' % facetid)
+ w(u'<div class="facetTitle" cubicweb:facetName="%s">%s</div>\n' %
(facetid, title))
- self.w(u'<input name="%s" type="text" value="%s" />\n' % (facetid, self.value or u''))
- self.w(u'</div>\n')
+ w(u'<input name="%s" type="text" value="%s" />\n' % (facetid, self.value or u''))
+ w(u'</div>\n')
class FacetRangeWidget(HTMLWidget):
@@ -1124,7 +1140,11 @@
self.minvalue = minvalue
self.maxvalue = maxvalue
+ def height(self):
+ return 3
+
def _render(self):
+ w = self.w
facet = self.facet
facet._cw.add_js('jquery.ui.js')
facet._cw.add_css('jquery.ui.css')
@@ -1138,26 +1158,26 @@
'formatter': self.formatter,
})
title = xml_escape(self.facet.title)
- self.w(u'<div id="%s" class="facet">\n' % facetid)
- self.w(u'<div class="facetTitle" cubicweb:facetName="%s">%s</div>\n' %
- (facetid, title))
+ w(u'<div id="%s" class="facet">\n' % facetid)
+ w(u'<div class="facetTitle" cubicweb:facetName="%s">%s</div>\n' %
+ (facetid, title))
cssclass = 'facetBody'
if not self.facet.start_unfolded:
cssclass += ' hidden'
- self.w(u'<div class="%s">\n' % cssclass)
- self.w(u'<span id="%s_inf"></span> - <span id="%s_sup"></span>'
- % (sliderid, sliderid))
- self.w(u'<input type="hidden" name="%s_inf" value="%s" />'
- % (facetid, self.minvalue))
- self.w(u'<input type="hidden" name="%s_sup" value="%s" />'
- % (facetid, self.maxvalue))
- self.w(u'<input type="hidden" name="min_%s_inf" value="%s" />'
- % (facetid, self.minvalue))
- self.w(u'<input type="hidden" name="max_%s_sup" value="%s" />'
- % (facetid, self.maxvalue))
- self.w(u'<div id="%s"></div>' % sliderid)
- self.w(u'</div>\n')
- self.w(u'</div>\n')
+ w(u'<div class="%s">\n' % cssclass)
+ w(u'<span id="%s_inf"></span> - <span id="%s_sup"></span>'
+ % (sliderid, sliderid))
+ w(u'<input type="hidden" name="%s_inf" value="%s" />'
+ % (facetid, self.minvalue))
+ w(u'<input type="hidden" name="%s_sup" value="%s" />'
+ % (facetid, self.maxvalue))
+ w(u'<input type="hidden" name="min_%s_inf" value="%s" />'
+ % (facetid, self.minvalue))
+ w(u'<input type="hidden" name="max_%s_sup" value="%s" />'
+ % (facetid, self.maxvalue))
+ w(u'<div id="%s"></div>' % sliderid)
+ w(u'</div>\n')
+ w(u'</div>\n')
class DateFacetRangeWidget(FacetRangeWidget):
@@ -1189,6 +1209,7 @@
self.selected = selected
def _render(self):
+ w = self.w
cssclass = 'facetValue facetCheckBox'
if self.selected:
cssclass += ' facetValueSelected'
@@ -1197,11 +1218,11 @@
else:
imgsrc = self._cw.data_url(self.unselected_img)
imgalt = self._cw._('not selected')
- self.w(u'<div class="%s" cubicweb:value="%s">\n'
- % (cssclass, xml_escape(unicode(self.value))))
- self.w(u'<img src="%s" alt="%s"/> ' % (imgsrc, imgalt))
- self.w(u'<a href="javascript: {}">%s</a>' % xml_escape(self.label))
- self.w(u'</div>')
+ w(u'<div class="%s" cubicweb:value="%s">\n'
+ % (cssclass, xml_escape(unicode(self.value))))
+ w(u'<img src="%s" alt="%s"/> ' % (imgsrc, imgalt))
+ w(u'<a href="javascript: {}">%s</a>' % xml_escape(self.label))
+ w(u'</div>')
class CheckBoxFacetWidget(HTMLWidget):
@@ -1214,10 +1235,14 @@
self.value = value
self.selected = selected
+ def height(self):
+ return 2
+
def _render(self):
+ w = self.w
title = xml_escape(self.facet.title)
facetid = xml_escape(self.facet.__regid__)
- self.w(u'<div id="%s" class="facet">\n' % facetid)
+ w(u'<div id="%s" class="facet">\n' % facetid)
cssclass = 'facetValue facetCheckBox'
if self.selected:
cssclass += ' facetValueSelected'
@@ -1226,14 +1251,14 @@
else:
imgsrc = self._cw.data_url(self.unselected_img)
imgalt = self._cw._('not selected')
- self.w(u'<div class="%s" cubicweb:value="%s">\n'
- % (cssclass, xml_escape(unicode(self.value))))
- self.w(u'<div class="facetCheckBoxWidget">')
- self.w(u'<img src="%s" alt="%s" cubicweb:unselimg="true" /> ' % (imgsrc, imgalt))
- self.w(u'<label class="facetTitle" cubicweb:facetName="%s"><a href="javascript: {}">%s</a></label>' % (facetid, title))
- self.w(u'</div>\n')
- self.w(u'</div>\n')
- self.w(u'</div>\n')
+ w(u'<div class="%s" cubicweb:value="%s">\n'
+ % (cssclass, xml_escape(unicode(self.value))))
+ w(u'<div class="facetCheckBoxWidget">')
+ w(u'<img src="%s" alt="%s" cubicweb:unselimg="true" /> ' % (imgsrc, imgalt))
+ w(u'<label class="facetTitle" cubicweb:facetName="%s"><a href="javascript: {}">%s</a></label>' % (facetid, title))
+ w(u'</div>\n')
+ w(u'</div>\n')
+ w(u'</div>\n')
class FacetSeparator(HTMLWidget):
--- a/web/form.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/form.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -112,7 +112,12 @@
if value:
self.add_hidden(param, value)
if submitmsg is not None:
- self.add_hidden(u'__message', submitmsg)
+ self.set_message(submitmsg)
+
+ def set_message(self, submitmsg):
+ """sets a submitmsg if exists, using _cwmsgid mechanism """
+ cwmsgid = self._cw.set_redirect_message(submitmsg)
+ self.add_hidden(u'_cwmsgid', cwmsgid)
@property
def root_form(self):
--- a/web/formfields.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/formfields.py Wed Jun 08 17:08:00 2011 +0200
@@ -37,6 +37,7 @@
.. autoclass:: cubicweb.web.formfields.StringField()
.. autoclass:: cubicweb.web.formfields.PasswordField()
.. autoclass:: cubicweb.web.formfields.IntField()
+.. autoclass:: cubicweb.web.formfields.BigIntField()
.. autoclass:: cubicweb.web.formfields.FloatField()
.. autoclass:: cubicweb.web.formfields.BooleanField()
.. autoclass:: cubicweb.web.formfields.DateField()
@@ -830,21 +831,25 @@
return super(EditableFileField, self)._process_form_value(form)
-class IntField(Field):
- """Use this field to edit integers (`Int` yams type). This field additionaly
- support `min` and `max` attributes that specify a minimum and/or maximum
- value for the integer (`None` meaning no boundary).
+class BigIntField(Field):
+ """Use this field to edit big integers (`BigInt` yams type). This field
+ additionaly support `min` and `max` attributes that specify a minimum and/or
+ maximum value for the integer (`None` meaning no boundary).
Unless explicitly specified, the widget for this field will be a
:class:`~cubicweb.web.formwidgets.TextInput`.
"""
+ default_text_input_size = 10
+
def __init__(self, min=None, max=None, **kwargs):
- super(IntField, self).__init__(**kwargs)
+ super(BigIntField, self).__init__(**kwargs)
self.min = min
self.max = max
+
+ def init_widget(self, widget):
+ super(BigIntField, self).init_widget(widget)
if isinstance(self.widget, fw.TextInput):
- self.widget.attrs.setdefault('size', 5)
- self.widget.attrs.setdefault('maxlength', 15)
+ self.widget.attrs.setdefault('size', self.default_text_input_size)
def _ensure_correctly_typed(self, form, value):
if isinstance(value, basestring):
@@ -858,6 +863,19 @@
return value
+class IntField(BigIntField):
+ """Use this field to edit integers (`Int` yams type). Similar to
+ :class:`~cubicweb.web.formfields.BigIntField` but set max length when text
+ input widget is used (the default).
+ """
+ default_text_input_size = 5
+
+ def init_widget(self, widget):
+ super(IntField, self).init_widget(widget)
+ if isinstance(self.widget, fw.TextInput):
+ self.widget.attrs.setdefault('maxlength', 15)
+
+
class BooleanField(Field):
"""Use this field to edit booleans (`Boolean` yams type).
@@ -1208,6 +1226,7 @@
'Boolean': BooleanField,
'Int': IntField,
+ 'BigInt': BigIntField,
'Float': FloatField,
'Decimal': StringField,
--- a/web/request.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/request.py Wed Jun 08 17:08:00 2011 +0200
@@ -92,7 +92,7 @@
self.uiprops = vreg.config.uiprops
self.datadir_url = vreg.config.datadir_url
# raw html headers that can be added from any view
- self.html_headers = HTMLHead()
+ self.html_headers = HTMLHead(self.datadir_url)
# form parameters
self.setup_params(form)
# dictionnary that may be used to store request data that has to be
@@ -214,6 +214,12 @@
if param == '_cwmsgid':
self.set_message_id(val)
elif param == '__message':
+ warn('[3.13] __message in request parameter is deprecated (may '
+ 'only be given to .build_url). Seeing this message usualy '
+ 'means your application hold some <form> where you should '
+ 'replace use of __message hidden input by form.set_message, '
+ 'so new _cwmsgid mechanism is properly used',
+ DeprecationWarning)
self.set_message(val)
else:
self.form[param] = val
@@ -256,7 +262,7 @@
"""used by AutomaticWebTest to clear html headers between tests on
the same resultset
"""
- self.html_headers = HTMLHead()
+ self.html_headers = HTMLHead(self.datadir_url)
return self
# web state helpers #######################################################
@@ -264,7 +270,7 @@
@property
def message(self):
try:
- return self.session.data.pop(self._msgid, '')
+ return self.session.data.pop(self._msgid, u'')
except AttributeError:
try:
return self._msg
@@ -283,6 +289,7 @@
return make_uid()
def set_redirect_message(self, msg):
+ # TODO - this should probably be merged with append_to_redirect_message
assert isinstance(msg, unicode)
msgid = self.redirect_message_id()
self.session.data[msgid] = msg
@@ -292,7 +299,7 @@
msgid = self.redirect_message_id()
currentmsg = self.session.data.get(msgid)
if currentmsg is not None:
- currentmsg = '%s %s' % (currentmsg, msg)
+ currentmsg = u'%s %s' % (currentmsg, msg)
else:
currentmsg = msg
self.session.data[msgid] = currentmsg
@@ -415,7 +422,8 @@
@cached # so it's writed only once
def fckeditor_config(self):
- self.add_js('fckeditor/fckeditor.js')
+ fckeditor_url = self.build_url('fckeditor/fckeditor.js')
+ self.add_js(fckeditor_url, localfile=False)
self.html_headers.define_var('fcklang', self.lang)
self.html_headers.define_var('fckconfigpath',
self.data_url('cubicweb.fckcwconfig.js'))
@@ -623,6 +631,16 @@
# urls/path management ####################################################
+ def build_url(self, *args, **kwargs):
+ """return an absolute URL using params dictionary key/values as URL
+ parameters. Values are automatically URL quoted, and the
+ publishing method to use may be specified or will be guessed.
+ """
+ if '__message' in kwargs:
+ msg = kwargs.pop('__message')
+ kwargs['_cwmsgid'] = self.set_redirect_message(msg)
+ return super(CubicWebRequestBase, self).build_url(*args, **kwargs)
+
def url(self, includeparams=True):
"""return currently accessed url"""
return self.base_url() + self.relative_path(includeparams)
@@ -888,10 +906,20 @@
def _parse_accept_header(raw_header, value_parser=None, value_sort_key=None):
"""returns an ordered list accepted types
- returned value is a list of 2-tuple (value, score), ordered
- by score. Exact type of `value` will depend on what `value_parser`
- will reutrn. if `value_parser` is None, then the raw value, as found
- in the http header, is used.
+ :param value_parser: a function to parse a raw accept chunk. If None
+ is provided, the function defaults to identity. If a function is provided,
+ it must accept 2 parameters ``value`` and ``other_params``. ``value`` is
+ the value found before the first ';', `other_params` is a dictionary
+ built from all other chunks after this first ';'
+
+ :param value_sort_key: a key function to sort values found in the accept
+ header. This function will be passed a 3-tuple
+ (raw_value, parsed_value, score). If None is provided, the default
+ sort_key is 1./score
+
+ :return: a list of 3-tuple (raw_value, parsed_value, score),
+ ordered by score. ``parsed_value`` will be the return value of
+ ``value_parser(raw_value)``
"""
if value_sort_key is None:
value_sort_key = lambda infos: 1./infos[-1]
@@ -926,7 +954,7 @@
'text/html;level=1', `mimetypeinfo` will be ('text', '*', {'level': '1'})
"""
try:
- media_type, media_subtype = value.strip().split('/')
+ media_type, media_subtype = value.strip().split('/', 1)
except ValueError: # safety belt : '/' should always be present
media_type = value.strip()
media_subtype = '*'
--- a/web/test/unittest_views_basecontrollers.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/test/unittest_views_basecontrollers.py Wed Jun 08 17:08:00 2011 +0200
@@ -194,7 +194,7 @@
'use_email-object:'+emaileid: peid,
}
path, params = self.expect_redirect_publish(req, 'edit')
- email.clear_all_caches()
+ email.cw_clear_all_caches()
self.assertEqual(email.address, 'adim@logilab.fr')
@@ -238,7 +238,7 @@
}
with self.assertRaises(ValidationError) as cm:
self.ctrl_publish(req)
- self.assertEqual(cm.exception.errors, {'amount-subject': 'value must be >= 0'})
+ self.assertEqual(cm.exception.errors, {'amount-subject': 'value -10 must be >= 0'})
req = self.request(rollbackfirst=True)
req.form = {'eid': ['X'],
'__type:X': 'Salesterm',
@@ -248,7 +248,7 @@
}
with self.assertRaises(ValidationError) as cm:
self.ctrl_publish(req)
- self.assertEqual(cm.exception.errors, {'amount-subject': 'value must be <= 100'})
+ self.assertEqual(cm.exception.errors, {'amount-subject': 'value 110 must be <= 100'})
req = self.request(rollbackfirst=True)
req.form = {'eid': ['X'],
'__type:X': 'Salesterm',
--- a/web/test/unittest_views_editforms.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/test/unittest_views_editforms.py Wed Jun 08 17:08:00 2011 +0200
@@ -64,6 +64,7 @@
])
self.assertListEqual(rbc(e, 'main', 'metadata'),
[('last_login_time', 'subject'),
+ ('cw_source', 'subject'),
('creation_date', 'subject'),
('cwuri', 'subject'),
('modification_date', 'subject'),
@@ -119,7 +120,8 @@
[('nom', 'subject'),
])
self.assertListEqual(rbc(e, 'main', 'metadata'),
- [('creation_date', 'subject'),
+ [('cw_source', 'subject'),
+ ('creation_date', 'subject'),
('cwuri', 'subject'),
('modification_date', 'subject'),
('created_by', 'subject'),
--- a/web/views/basecontrollers.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/basecontrollers.py Wed Jun 08 17:08:00 2011 +0200
@@ -102,7 +102,7 @@
msg = self._cw._('you have been logged out')
# force base_url so on dual http/https configuration, we generate an url
# on the http version of the site
- return self._cw.build_url('view', vid='index', __message=msg,
+ return self._cw.build_url('view', vid='loggedout',
base_url=self._cw.vreg.config['base-url'])
--- a/web/views/basetemplates.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/basetemplates.py Wed Jun 08 17:08:00 2011 +0200
@@ -25,7 +25,7 @@
from cubicweb.appobject import objectify_selector
from cubicweb.selectors import match_kwargs, no_cnx, anonymous_user
-from cubicweb.view import View, MainTemplate, NOINDEX, NOFOLLOW
+from cubicweb.view import View, MainTemplate, NOINDEX, NOFOLLOW, StartupView
from cubicweb.utils import UStringIO
from cubicweb.schema import display_name
from cubicweb.web import component, formfields as ff, formwidgets as fw
@@ -66,19 +66,19 @@
self.wview('logform', rset=self.cw_rset, id='loginBox', klass='')
-class LoggedOutTemplate(LogInOutTemplate):
+class LoggedOutTemplate(StartupView):
__regid__ = 'loggedout'
+ __select__ = anonymous_user()
title = 'logged out'
- def content(self, w):
- # FIXME Deprecated code ?
+ def call(self):
msg = self._cw._('you have been logged out')
- w(u'<h2>%s</h2>\n' % msg)
- if self._cw.vreg.config.anonymous_user()[0]:
- indexurl = self._cw.build_url('view', vid='index', __message=msg)
- w(u'<p><a href="%s">%s</a><p>' % (
- xml_escape(indexurl),
- self._cw._('go back to the index page')))
+ if self._cw.cnx:
+ comp = self._cw.vreg['components'].select('applmessages', self._cw)
+ comp.render(w=self.w, msg=msg)
+ self.wview('index')
+ else:
+ self.w(u'<h2>%s</h2>' % msg)
@objectify_selector
--- a/web/views/cwsources.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/cwsources.py Wed Jun 08 17:08:00 2011 +0200
@@ -223,7 +223,8 @@
class CWSourceManagementView(StartupView):
__regid__ = 'cw.source-management'
- rql = ('Any S, ST, SN ORDERBY SN WHERE S is CWSource, S name SN, S type ST')
+ rql = ('Any S, ST, SP, SD, SN ORDERBY SN WHERE S is CWSource, S name SN, S type ST, '
+ 'S latest_retrieval SD, S parser SP')
title = _('data sources management')
def call(self, **kwargs):
@@ -234,4 +235,4 @@
self._cw.build_url('add/%s' % eschema),
self._cw._('add a CWSource')))
self.w(u'<div class="clear"></div>')
- self.wview('table', self._cw.execute(self.rql), displaycols=range(2))
+ self.wview('table', self._cw.execute(self.rql), displaycols=range(4))
--- a/web/views/facets.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/facets.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -73,6 +73,7 @@
req = self._cw
req.add_js( self.needs_js )
req.add_css( self.needs_css)
+ req.html_headers.define_var('facetLoadingMsg', req._('facet-loading-msg'))
if self.roundcorners:
req.html_headers.add_onload('jQuery(".facet").corner("tl br 10px");')
rset, vid, divid, paginate = self._get_context()
@@ -202,6 +203,11 @@
rtype = 'has_text'
role = 'subject'
order = 0
+
+ @property
+ def wdgclass(self):
+ return facet.FacetStringWidget
+
@property
def title(self):
return self._cw._('has_text')
@@ -212,7 +218,7 @@
default implentation expects a .vocabulary method on the facet and
return a combobox displaying this vocabulary
"""
- return facet.FacetStringWidget(self)
+ return self.wdgclass(self)
def add_rql_restrictions(self):
"""add restriction for this facet into the rql syntax tree"""
--- a/web/views/navigation.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/navigation.py Wed Jun 08 17:08:00 2011 +0200
@@ -40,10 +40,10 @@
self.clean_params(params)
basepath = self._cw.relative_path(includeparams=False)
self.w(u'<div class="pagination">')
- self.w(u'%s ' % self.previous_link(basepath, params))
+ self.w(self.previous_link(basepath, params))
self.w(u'[ %s ]' %
u' | '.join(self.iter_page_links(basepath, params)))
- self.w(u' %s' % self.next_link(basepath, params))
+ self.w(u'  %s' % self.next_link(basepath, params))
self.w(u'</div>')
def index_display(self, start, stop):
@@ -74,12 +74,12 @@
basepath = self._cw.relative_path(includeparams=False)
w = self.w
w(u'<div class="pagination">')
- w(u'%s ' % self.previous_link(basepath, params))
+ w(self.previous_link(basepath, params))
w(u'<select onchange="javascript: document.location=this.options[this.selectedIndex].value">')
for option in self.iter_page_links(basepath, params):
w(option)
w(u'</select>')
- w(u' %s' % self.next_link(basepath, params))
+ w(u'  %s' % self.next_link(basepath, params))
w(u'</div>')
--- a/web/views/owl.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/owl.py Wed Jun 08 17:08:00 2011 +0200
@@ -40,6 +40,7 @@
'Boolean': 'xsd:boolean',
'Int': 'xsd:int',
+ 'BigInt': 'xsd:int',
'Float': 'xsd:float',
'Decimal' : 'xsd:decimal',
--- a/web/views/plots.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/plots.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -33,14 +33,14 @@
"""accept result set with at least one line and two columns of result
all columns after second must be of numerical types"""
for etype in rset.description[0]:
- if etype not in ('Int', 'Float'):
+ if etype not in ('Int', 'BigInt', 'Float'):
return 0
return 1
@objectify_selector
def second_column_is_number(cls, req, rset=None, *args, **kwargs):
etype = rset.description[0][1]
- if etype not in ('Int', 'Float'):
+ if etype not in ('Int', 'BigInt', 'Float'):
return 0
return 1
@@ -50,7 +50,7 @@
if etypes[0] not in ('Date', 'Datetime', 'TZDatetime'):
return 0
for etype in etypes[1:]:
- if etype not in ('Int', 'Float'):
+ if etype not in ('Int', 'BigInt', 'Float'):
return 0
return 1
--- a/web/views/sparql.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/sparql.py Wed Jun 08 17:08:00 2011 +0200
@@ -80,6 +80,7 @@
'Boolean': 'boolean',
'Int': 'integer',
+ 'BigInt': 'integer',
'Float': 'float',
'Datetime': 'dateTime',
--- a/web/views/tableview.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/tableview.py Wed Jun 08 17:08:00 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -22,7 +22,7 @@
from logilab.mtconverter import xml_escape
-from cubicweb.selectors import nonempty_rset, match_form_params
+from cubicweb.selectors import nonempty_rset
from cubicweb.utils import make_uid, json_dumps
from cubicweb.view import EntityView, AnyRsetView
from cubicweb import tags
@@ -31,6 +31,7 @@
from cubicweb.web.component import Link
from cubicweb.web.htmlwidgets import (TableWidget, TableColumn, MenuWidget,
PopupBoxMenu)
+from cubicweb.web import facet
from cubicweb.web.facet import prepare_facets_rqlst, filter_hiddens
class TableView(AnyRsetView):
@@ -42,6 +43,7 @@
__regid__ = 'table'
title = _('table')
finalview = 'final'
+ wdg_stack_size = 8
def form_filter(self, divid, displaycols, displayactions, displayfilter,
paginate, hidden=True):
@@ -72,6 +74,8 @@
w = self.w
self._cw.add_css('cubicweb.facets.css')
self._cw.add_js( ('cubicweb.ajax.js', 'cubicweb.facets.js'))
+ self._cw.html_headers.define_var('facetLoadingMsg',
+ self._cw._('facet-loading-msg'))
# drop False / None values from vidargs
vidargs = dict((k, v) for k, v in vidargs.iteritems() if v)
w(u'<form method="post" cubicweb:facetargs="%s" action="">' %
@@ -81,12 +85,36 @@
w(u'<input type="hidden" name="fromformfilter" value="1" />')
filter_hiddens(w, facets=','.join(wdg.facet.__regid__ for wdg in fwidgets),
baserql=baserql)
+ self._build_form_table(fwidgets)
+
+ def _facet_widget_sort(self, fwidgets):
+ fwidgets.sort(key=lambda x: x.height())
+
+ def _build_form_table(self, fwidgets):
+ # sort by widget height
+ w = self.w
+ self._facet_widget_sort(fwidgets)
w(u'<table class="filter">\n')
+ widget_queue = []
+ queue_size = 0
w(u'<tr>\n')
for wdg in fwidgets:
+ height = wdg.height()
+ if queue_size + height <= self.wdg_stack_size:
+ widget_queue.append(wdg)
+ queue_size += height
+ continue
w(u'<td>')
- wdg.render(w=w)
- w(u'</td>\n')
+ for queued in widget_queue:
+ queued.render(w=w)
+ w(u'</td>')
+ widget_queue = [wdg]
+ queue_size = height
+ if widget_queue:
+ w(u'<td>')
+ for queued in widget_queue:
+ queued.render(w=w)
+ w(u'</td>')
w(u'</tr>\n')
w(u'</table>\n')
w(u'</fieldset>\n')
--- a/web/views/urlpublishing.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/views/urlpublishing.py Wed Jun 08 17:08:00 2011 +0200
@@ -260,9 +260,8 @@
else:
try:
action = actionsreg._select_best(actions, req, rset=rset)
+ if action is not None:
+ raise Redirect(action.url())
except RegistryException:
- continue
- else:
- # XXX avoid redirect
- raise Redirect(action.url())
+ pass # continue searching
raise PathDontMatch()
--- a/web/webconfig.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/webconfig.py Wed Jun 08 17:08:00 2011 +0200
@@ -300,19 +300,17 @@
if not (self.repairing or self.creating):
self.global_set_option('base-url', baseurl)
httpsurl = self['https-url']
+ if (self.debugmode or self.mode == 'test'):
+ datadir_path = 'data/'
+ else:
+ datadir_path = 'data/%s/' % self.instance_md5_version()
if httpsurl:
if httpsurl[-1] != '/':
httpsurl += '/'
if not self.repairing:
self.global_set_option('https-url', httpsurl)
- if self.debugmode:
- self.https_datadir_url = httpsurl + 'data/'
- else:
- self.https_datadir_url = httpsurl + 'data%s/' % self.instance_md5_version()
- if self.debugmode:
- self.datadir_url = baseurl + 'data/'
- else:
- self.datadir_url = baseurl + 'data%s/' % self.instance_md5_version()
+ self.https_datadir_url = httpsurl + datadir_path
+ self.datadir_url = baseurl + datadir_path
def _build_ui_properties(self):
# self.datadir_url[:-1] to remove trailing /
--- a/web/webctl.py Wed Jun 08 15:11:45 2011 +0200
+++ b/web/webctl.py Wed Jun 08 17:08:00 2011 +0200
@@ -21,9 +21,22 @@
__docformat__ = "restructuredtext en"
+import os, os.path as osp
+from shutil import copy
+
from logilab.common.shellutils import ASK
-from cubicweb.toolsutils import CommandHandler, underline_title
+from cubicweb import ExecutionError
+from cubicweb.cwctl import CWCTL
+from cubicweb.cwconfig import CubicWebConfiguration as cwcfg
+from cubicweb.toolsutils import Command, CommandHandler, underline_title
+
+
+try:
+ from os import symlink as linkdir
+except ImportError:
+ from shutil import copytree as linkdir
+
class WebCreateHandler(CommandHandler):
cmdname = 'create'
@@ -43,3 +56,57 @@
def postcreate(self, *args, **kwargs):
"""hooks called once instance's initialization has been completed"""
+
+
+class GenStaticDataDir(Command):
+ """Create a directory merging all data directory content from cubes and CW.
+ """
+ name = 'gen-static-datadir'
+ arguments = '<instance> [dirpath]'
+ min_args = 1
+ max_args = 2
+
+ options = ()
+
+ def run(self, args):
+ appid = args.pop(0)
+ config = cwcfg.config_for(appid)
+ if args:
+ dest = args[0]
+ else:
+ dest = osp.join(config.appdatahome, 'data')
+ if osp.exists(dest):
+ raise ExecutionError('Directory %s already exists. '
+ 'Remove it first.' % dest)
+ config.quick_start = True # notify this is not a regular start
+ # list all resources (no matter their order)
+ resources = set()
+ for datadir in self._datadirs(config):
+ for dirpath, dirnames, filenames in os.walk(datadir):
+ rel_dirpath = dirpath[len(datadir)+1:]
+ resources.update(osp.join(rel_dirpath, f) for f in filenames)
+ # locate resources and copy them to destination
+ for resource in resources:
+ dirname = osp.dirname(resource)
+ dest_resource = osp.join(dest, dirname)
+ if not osp.isdir(dest_resource):
+ os.makedirs(dest_resource)
+ resource_dir, resource_path = config.locate_resource(resource)
+ copy(osp.join(resource_dir, resource_path), dest_resource)
+ # handle md5 version subdirectory
+ linkdir(dest, osp.join(dest, config.instance_md5_version()))
+ print ('You can use apache rewrite rule below :\n'
+ 'RewriteRule ^/data/(.*) %s/$1 [L]' % dest)
+
+ def _datadirs(self, config):
+ repo = config.repository()
+ if config._cubes is None:
+ # web only config
+ config.init_cubes(repo.get_cubes())
+ for cube in repo.get_cubes():
+ cube_datadir = osp.join(cwcfg.cube_dir(cube), 'data')
+ if osp.isdir(cube_datadir):
+ yield cube_datadir
+ yield osp.join(config.shared_dir(), 'data')
+
+CWCTL.register(GenStaticDataDir)