--- a/cwconfig.py Tue Aug 11 11:43:59 2009 +0200
+++ b/cwconfig.py Tue Aug 11 17:07:48 2009 +0200
@@ -316,7 +316,7 @@
return getattr(cls.cube_pkginfo(cube), '__recommend__', ())
@classmethod
- def expand_cubes(cls, cubes):
+ def expand_cubes(cls, cubes, with_recommends=False):
"""expand the given list of top level cubes used by adding recursivly
each cube dependencies
"""
@@ -329,6 +329,12 @@
depcube = CW_MIGRATION_MAP.get(depcube, depcube)
cubes.append(depcube)
todo.append(depcube)
+ if with_recommends:
+ for depcube in cls.cube_recommends(cube):
+ if depcube not in cubes:
+ depcube = CW_MIGRATION_MAP.get(depcube, depcube)
+ cubes.append(depcube)
+ todo.append(depcube)
return cubes
@classmethod
--- a/cwvreg.py Tue Aug 11 11:43:59 2009 +0200
+++ b/cwvreg.py Tue Aug 11 17:07:48 2009 +0200
@@ -250,8 +250,8 @@
def itervalues(self):
return (value for key, value in self.items())
- def reset(self):
- super(CubicWebVRegistry, self).reset()
+ def reset(self, path=None, force_reload=None):
+ super(CubicWebVRegistry, self).reset(path, force_reload)
self._needs_iface = {}
# two special registries, propertydefs which care all the property
# definitions, and propertyvals which contains values for those
@@ -260,13 +260,26 @@
self['propertyvalues'] = self.eprop_values = {}
for key, propdef in self.config.eproperty_definitions():
self.register_property(key, **propdef)
+ if path is not None and force_reload:
+ cleanup_sys_modules(path)
+ cubes = self.config.cubes()
+ # if the fs code use some cubes not yet registered into the instance
+ # we should cleanup sys.modules for those as well to avoid potential
+ # bad class reference pb after reloading
+ cfg = self.config
+ for cube in cfg.expand_cubes(cubes, with_recommends=True):
+ if not cube in cubes:
+ cpath = cfg.build_vregistry_cube_path([cfg.cube_dir(cube)])
+ cleanup_sys_modules(cpath)
def set_schema(self, schema):
"""set instance'schema and load application objects"""
self.schema = schema
clear_cache(self, 'rqlhelper')
# now we can load application's web objects
- self.register_objects(self.config.vregistry_path())
+ searchpath = self.config.vregistry_path()
+ self.reset(searchpath, force_reload=False)
+ self.register_objects(searchpath, force_reload=False)
# map lowered entity type names to their actual name
self.case_insensitive_etypes = {}
for etype in self.schema.entities():
@@ -302,13 +315,14 @@
def register_objects(self, path, force_reload=None):
"""overriden to remove objects requiring a missing interface"""
+ if force_reload is None:
+ force_reload = self.config.mode == 'dev'
try:
self._register_objects(path, force_reload)
except RegistryOutOfDate:
CW_EVENT_MANAGER.emit('before-registry-reload')
# modification detected, reset and reload
- self.reset()
- cleanup_sys_modules(path)
+ self.reset(path, force_reload)
self._register_objects(path, force_reload)
CW_EVENT_MANAGER.emit('after-registry-reload')
--- a/server/__init__.py Tue Aug 11 11:43:59 2009 +0200
+++ b/server/__init__.py Tue Aug 11 17:07:48 2009 +0200
@@ -24,11 +24,12 @@
# server-side debugging #########################################################
# server debugging flags. They may be combined using binary operators.
-DBG_NONE = 0 # no debug information
-DBG_RQL = 1 # rql execution information
-DBG_SQL = 2 # executed sql
-DBG_REPO = 4 # repository events
-DBG_MORE = 8 # repository events
+DBG_NONE = 0 # no debug information
+DBG_RQL = 1 # rql execution information
+DBG_SQL = 2 # executed sql
+DBG_REPO = 4 # repository events
+DBG_MS = 8 # multi-sources
+DBG_MORE = 16 # repository events
# current debug mode
DEBUG = 0
--- a/server/msplanner.py Tue Aug 11 11:43:59 2009 +0200
+++ b/server/msplanner.py Tue Aug 11 17:07:48 2009 +0200
@@ -267,7 +267,7 @@
self._conflicts = []
if rqlhelper is not None: # else test
self._insert_identity_variable = rqlhelper._annotator.rewrite_shared_optional
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print 'sourcesterms:'
self._debug_sourcesterms()
@@ -1023,7 +1023,7 @@
the rqlst should not be tagged at this point
"""
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print '-'*80
print 'PLANNING', rqlst
for select in rqlst.children:
@@ -1040,7 +1040,7 @@
ppis = [PartPlanInformation(plan, select, self.rqlhelper)
for select in rqlst.children]
steps = self._union_plan(plan, rqlst, ppis)
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
from pprint import pprint
for step in plan.steps:
pprint(step.test_repr())
@@ -1235,7 +1235,7 @@
return rqlst
def filter(self, sources, terms, rqlst, solindices, needsel, final):
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print 'filter', final and 'final' or '', sources, terms, rqlst, solindices, needsel
newroot = Select()
self.sources = sorted(sources)
@@ -1329,7 +1329,7 @@
elif ored:
newroot.remove_node(rel)
add_types_restriction(self.schema, rqlst, newroot, solutions)
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print '--->', newroot
return newroot, self.insertedvars
--- a/server/pool.py Tue Aug 11 11:43:59 2009 +0200
+++ b/server/pool.py Tue Aug 11 17:07:48 2009 +0200
@@ -33,6 +33,17 @@
self.source_cnxs['system'] = self.source_cnxs[sources[0].uri]
self._cursors = {}
+ def __getitem__(self, uri):
+ """subscription notation provide access to sources'cursors"""
+ try:
+ cursor = self._cursors[uri]
+ except KeyError:
+ cursor = self.source_cnxs[uri][1].cursor()
+ if cursor is not None:
+ # None possible on sources without cursor support such as ldap
+ self._cursors[uri] = cursor
+ return cursor
+
def commit(self):
"""commit the current transaction for this user"""
# FIXME: what happends if a commit fail
@@ -77,22 +88,11 @@
for source, cnx in self.source_cnxs.values():
source.pool_reset(cnx)
- def __getitem__(self, uri):
- """subscription notation provide access to sources'cursors"""
- try:
- cursor = self._cursors[uri]
- except KeyError:
- cursor = self.source_cnxs[uri][1].cursor()
- if cursor is not None:
- # None possible on sources without cursor support such as ldap
- self._cursors[uri] = cursor
- return cursor
-
def sources(self):
"""return the source objects handled by this pool"""
# implementation details of flying insert requires the system source
# first
- yield self.source_cnxs['system']
+ yield self.source_cnxs['system'][0]
for uri, (source, cursor) in self.source_cnxs.items():
if uri == 'system':
continue
@@ -107,11 +107,17 @@
"""return the connection on the source object with the given uri"""
return self.source_cnxs[uid][1]
- def reconnect(self, source):
- """reopen a connection for this source"""
- source.info('trying to reconnect')
- self.source_cnxs[source.uri] = (source, source.get_connection())
- del self._cursors[source.uri]
+ def reconnect(self, source=None):
+ """reopen a connection for this source or all sources if none specified
+ """
+ if source is None:
+ sources = self.sources()
+ else:
+ sources = (source,)
+ for source in sources:
+ source.info('trying to reconnect')
+ self.source_cnxs[source.uri] = (source, source.get_connection())
+ self._cursors.pop(source.uri, None)
def check_connections(self):
for source, cnx in self.source_cnxs.itervalues():
--- a/server/session.py Tue Aug 11 11:43:59 2009 +0200
+++ b/server/session.py Tue Aug 11 17:07:48 2009 +0200
@@ -208,9 +208,9 @@
"""connections pool, set according to transaction mode for each query"""
return getattr(self._threaddata, 'pool', None)
- def set_pool(self):
+ def set_pool(self, checkclosed=True):
"""the session need a pool to execute some queries"""
- if self._closed:
+ if checkclosed and self._closed:
raise Exception('try to set pool on a closed session')
if self.pool is None:
# get pool first to avoid race-condition
@@ -335,7 +335,7 @@
csession = ChildSession(self)
self._threaddata.childsession = csession
# need shared pool set
- self.set_pool()
+ self.set_pool(checkclosed=False)
return csession
def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
--- a/server/sources/extlite.py Tue Aug 11 11:43:59 2009 +0200
+++ b/server/sources/extlite.py Tue Aug 11 17:07:48 2009 +0200
@@ -35,13 +35,13 @@
def commit(self):
if self._cnx is not None:
- if server.DEBUG & server.DBG_SQL:
+ if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
print 'sql cnx COMMIT', self._cnx
self._cnx.commit()
def rollback(self):
if self._cnx is not None:
- if server.DEBUG & server.DBG_SQL:
+ if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
print 'sql cnx ROLLBACK', self._cnx
self._cnx.rollback()
--- a/server/sources/ldapuser.py Tue Aug 11 11:43:59 2009 +0200
+++ b/server/sources/ldapuser.py Tue Aug 11 17:07:48 2009 +0200
@@ -181,6 +181,7 @@
def reset_caches(self):
"""method called during test to reset potential source caches"""
+ self._cache = {}
self._query_cache = TimedCache(2*60)
def init(self):
--- a/server/sources/pyrorql.py Tue Aug 11 11:43:59 2009 +0200
+++ b/server/sources/pyrorql.py Tue Aug 11 17:07:48 2009 +0200
@@ -127,6 +127,10 @@
register_persistent_options(myoptions)
self._query_cache = TimedCache(30)
+ def reset_caches(self):
+ """method called during test to reset potential source caches"""
+ self._query_cache = TimedCache(30)
+
def last_update_time(self):
pkey = u'sources.%s.latest-update-time' % self.uri
rql = 'Any V WHERE X is CWProperty, X value V, X pkey %(k)s'
--- a/vregistry.py Tue Aug 11 11:43:59 2009 +0200
+++ b/vregistry.py Tue Aug 11 17:07:48 2009 +0200
@@ -206,7 +206,7 @@
super(VRegistry, self).__init__()
self.config = config
- def reset(self, force_reload=None):
+ def reset(self, path=None, force_reload=None):
self.clear()
self._lastmodifs = {}
@@ -318,14 +318,7 @@
self._loadedmods = {}
return filemods
- def register_objects(self, path, force_reload=None, extrapath=None):
- if force_reload is None:
- force_reload = self.config.mode == 'dev'
- elif not force_reload:
- # force_reload == False usually mean modules have been reloaded
- # by another connection, so we want to update the registry
- # content even if there has been no module content modification
- self.reset()
+ def register_objects(self, path, force_reload, extrapath=None):
# need to clean sys.path this to avoid import confusion pb (i.e.
# having the same module loaded as 'cubicweb.web.views' subpackage and
# as views' or 'web.views' subpackage