backport stable
authorSylvain Thénault <sylvain.thenault@logilab.fr>
Thu, 03 Jun 2010 10:17:44 +0200
changeset 5655 ef903fff826d
parent 5654 8bb34548be86 (current diff)
parent 5652 904091dc4c7e (diff)
child 5658 7b9553a9db65
backport stable
cwconfig.py
etwist/server.py
server/repository.py
server/sources/__init__.py
server/sources/native.py
server/test/unittest_multisources.py
vregistry.py
web/application.py
--- a/cwconfig.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/cwconfig.py	Thu Jun 03 10:17:44 2010 +0200
@@ -291,7 +291,9 @@
     name = None
     # log messages format (see logging module documentation for available keys)
     log_format = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
-    # nor remove appobjects based on unused interface
+    # the format below can be useful to debug multi thread issues:
+    # log_format = '%(asctime)s - [%(threadName)s] (%(name)s) %(levelname)s: %(message)s'
+    # nor remove appobjects based on unused interface [???]
     cleanup_interface_sobjects = True
 
 
@@ -687,7 +689,16 @@
                 logthreshold = 'DEBUG'
             else:
                 logthreshold = self['log-threshold']
-        init_log(self.debugmode, syslog, logthreshold, logfile, self.log_format)
+        if sys.platform == 'win32':
+            # no logrotate on win32, so use logging rotation facilities
+            # for now, hard code weekly rotation every sunday, and 52 weeks kept
+            # idea: make this configurable?
+            init_log(self.debugmode, syslog, logthreshold, logfile, self.log_format,
+                     rotation_parameters={'when': 'W6', # every sunday
+                                          'interval': 1,
+                                          'backupCount': 52,})
+        else:
+            init_log(self.debugmode, syslog, logthreshold, logfile, self.log_format)
         # configure simpleTal logger
         logging.getLogger('simpleTAL').setLevel(logging.ERROR)
 
--- a/etwist/server.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/etwist/server.py	Thu Jun 03 10:17:44 2010 +0200
@@ -25,6 +25,7 @@
 import select
 import errno
 import traceback
+import threading
 from os.path import join
 from time import mktime
 from datetime import date, timedelta
@@ -415,8 +416,11 @@
         os.setuid(uid)
     root_resource.start_service()
     logger.info('instance started on %s', root_resource.base_url)
+    # avoid annoying warnign if not in Main Thread
+    signals = threading.currentThread().getName() == 'MainThread'
     if config['profile']:
         import cProfile
-        cProfile.runctx('reactor.run()', globals(), locals(), config['profile'])
+        cProfile.runctx('reactor.run(installSignalHandlers=%s)' % signals,
+                        globals(), locals(), config['profile'])
     else:
-        reactor.run()
+        reactor.run(installSignalHandlers=signals)
--- a/server/repository.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/server/repository.py	Thu Jun 03 10:17:44 2010 +0200
@@ -566,7 +566,7 @@
         user._cw = user.cw_rset.req = session
         user.cw_clear_relation_cache()
         self._sessions[session.id] = session
-        self.info('opened %s', session)
+        self.info('opened session %s for user %s', session.id, login)
         self.hm.call_hooks('session_open', session)
         # commit session at this point in case write operation has been done
         # during `session_open` hooks
--- a/server/sources/__init__.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/server/sources/__init__.py	Thu Jun 03 10:17:44 2010 +0200
@@ -52,9 +52,11 @@
     return True
 
 class TimedCache(dict):
-    def __init__(self, ttlm, ttls=0):
-        # time to live in minutes
-        self.ttl = timedelta(0, ttlm*60 + ttls, 0)
+    def __init__(self, ttl):
+        # time to live in seconds
+        if ttl <= 0:
+            raise ValueError('TimedCache initialized with a ttl of %ss' % self.ttl.seconds)
+        self.ttl = timedelta(seconds=ttl)
 
     def __setitem__(self, key, value):
         dict.__setitem__(self, key, (datetime.now(), value))
--- a/server/sources/ldapuser.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/server/sources/ldapuser.py	Thu Jun 03 10:17:44 2010 +0200
@@ -33,7 +33,7 @@
 WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
 FOR A PARTICULAR PURPOSE.
 """
-
+from __future__ import division
 from base64 import b64decode
 
 from logilab.common.textutils import splitstrip
@@ -158,7 +158,7 @@
         ('cache-life-time',
          {'type' : 'time',
           'default': '2h',
-          'help': 'life time of query cache in minutes (default to two hours).',
+          'help': 'life time of query cache (default to two hours).',
           'group': 'ldap-source', 'level': 3,
           }),
 
@@ -187,23 +187,28 @@
                               for o in self.user_classes]
         self._conn = None
         self._cache = {}
-        ttlm = time_validator(None, None,
-                              source_config.get('cache-life-time', 2*60))
-        self._query_cache = TimedCache(ttlm)
+        # ttlm is in minutes!
+        self._cache_ttl = time_validator(None, None,
+                              source_config.get('cache-life-time', 2*60*60))
+        self._cache_ttl = max(71, self._cache_ttl)
+        self._query_cache = TimedCache(self._cache_ttl)
+        # interval is in seconds !
         self._interval = time_validator(None, None,
-                                        source_config.get('synchronization-interval',
-                                               24*60*60))
+                                    source_config.get('synchronization-interval',
+                                                      24*60*60))
 
     def reset_caches(self):
         """method called during test to reset potential source caches"""
         self._cache = {}
-        self._query_cache = TimedCache(2*60)
+        self._query_cache = TimedCache(self._cache_ttl)
 
     def init(self):
         """method called by the repository once ready to handle request"""
         self.info('ldap init')
-        self.repo.looping_task(self._interval, self.synchronize)
-        self.repo.looping_task(self._query_cache.ttl.seconds/10,
+        # set minimum period of 5min 1s (the additional second is to minimize
+        # resonnance effet)
+        self.repo.looping_task(max(301, self._interval), self.synchronize)
+        self.repo.looping_task(self._cache_ttl // 10,
                                self._query_cache.clear_expired)
 
     def synchronize(self):
--- a/server/sources/native.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/server/sources/native.py	Thu Jun 03 10:17:44 2010 +0200
@@ -256,6 +256,7 @@
         # we need a lock to protect eid attribution function (XXX, really?
         # explain)
         self._eid_creation_lock = Lock()
+        self._eid_creation_cnx = self.get_connection()
         # (etype, attr) / storage mapping
         self._storages = {}
         # entity types that may be used by other multi-sources instances
@@ -271,6 +272,9 @@
             def pool_reset(cnx):
                 cnx.close()
             self.pool_reset = pool_reset
+        if self.dbdriver == 'sqlite':
+            self._create_eid = None
+            self.create_eid = self._create_eid_sqlite
 
     @property
     def _sqlcnx(self):
@@ -711,7 +715,7 @@
         return None
 
     def make_temp_table_name(self, table):
-        try: # XXX remove this once 
+        try: # XXX remove this once
             return self.dbhelper.temporary_table_name(table)
         except AttributeError:
             import warnings
@@ -729,7 +733,7 @@
         sql = self.dbhelper.sql_temporary_table(table, schema, False)
         self.doexec(session, sql)
 
-    def create_eid(self, session):
+    def _create_eid_sqlite(self, session):
         self._eid_creation_lock.acquire()
         try:
             for sql in self.dbhelper.sqls_increment_sequence('entities_id_seq'):
@@ -738,6 +742,51 @@
         finally:
             self._eid_creation_lock.release()
 
+
+    def create_eid(self, session):
+        self.debug('create eid')
+        # lock needed to prevent 'Connection is busy with results for another command (0)' errors with SQLServer
+        self._eid_creation_lock.acquire()
+        try:
+            return self._create_eid()
+        finally:
+            self._eid_creation_lock.release()
+
+    def _create_eid(self):
+        # internal function doing the eid creation without locking.
+        # needed for the recursive handling of disconnections (otherwise we
+        # deadlock on self._eid_creation_lock
+        if self._eid_creation_cnx is None:
+            self._eid_creation_cnx = self.get_connection()
+        cnx = self._eid_creation_cnx
+        cursor = cnx.cursor()
+        try:
+            for sql in self.dbhelper.sqls_increment_sequence('entities_id_seq'):
+                cursor.execute(sql)
+            eid = cursor.fetchone()[0]
+        except (self.OperationalError, self.InterfaceError):
+            # FIXME: better detection of deconnection pb
+            self.warning("trying to reconnect create eid connection")
+            self._eid_creation_cnx = None
+            return self._create_eid()
+        except (self.DbapiError,), exc:
+            # We get this one with pyodbc and SQL Server when connection was reset
+            if exc.args[0] == '08S01':
+                self.warning("trying to reconnect create eid connection")
+                self._eid_creation_cnx = None
+                return self._create_eid()
+            else:
+                raise
+        except: # WTF?
+            cnx.rollback()
+            self._eid_creation_cnx = None
+            self.exception('create eid failed in an unforeseen way on SQL statement %s', sql)
+            raise
+        else:
+            cnx.commit()
+            return eid
+
+
     def add_info(self, session, entity, source, extid, complete):
         """add type and source info for an eid into the system table"""
         # begin by inserting eid/type/source/extid into the entities table
--- a/server/sources/pyrorql.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/server/sources/pyrorql.py	Thu Jun 03 10:17:44 2010 +0200
@@ -144,11 +144,11 @@
                        'group': 'sources',
                        }),)
         register_persistent_options(myoptions)
-        self._query_cache = TimedCache(30)
+        self._query_cache = TimedCache(1800)
 
     def reset_caches(self):
         """method called during test to reset potential source caches"""
-        self._query_cache = TimedCache(30)
+        self._query_cache = TimedCache(1800)
 
     def last_update_time(self):
         pkey = u'sources.%s.latest-update-time' % self.uri
--- a/server/test/unittest_multisources.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/server/test/unittest_multisources.py	Thu Jun 03 10:17:44 2010 +0200
@@ -24,7 +24,6 @@
 from cubicweb.devtools.testlib import CubicWebTC, refresh_repo
 from cubicweb.devtools.repotest import do_monkey_patch, undo_monkey_patch
 
-TestServerConfiguration.no_sqlite_wrap = True
 
 class TwoSourcesConfiguration(TestServerConfiguration):
     sourcefile = 'sources_multi'
@@ -48,6 +47,7 @@
 Connection_close = Connection.close
 
 def setup_module(*args):
+    TestServerConfiguration.no_sqlite_wrap = True
     # hi-jack PyroRQLSource.get_connection to access existing connection (no
     # pyro connection)
     PyroRQLSource.get_connection = lambda x: x.uri == 'extern-multi' and cnx3 or cnx2
@@ -64,6 +64,7 @@
     del repo2, cnx2, repo3, cnx3
     #del TwoSourcesTC.config.vreg
     #del TwoSourcesTC.config
+    TestServerConfiguration.no_sqlite_wrap = False
 
 class TwoSourcesTC(CubicWebTC):
     config = TwoSourcesConfiguration('data')
--- a/vregistry.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/vregistry.py	Thu Jun 03 10:17:44 2010 +0200
@@ -375,7 +375,7 @@
             registry.register(obj, oid=oid, clear=clear)
             self.debug('registered appobject %s in registry %s with id %s',
                        vname, registryname, oid or class_regid(obj))
-        self._loadedmods[obj.__module__][classid(obj)] = obj
+        self._loadedmods.setdefault(obj.__module__, {})[classid(obj)] = obj
 
     def unregister(self, obj, registryname=None):
         """unregister `obj` application object from the registry `registryname` or
--- a/web/application.py	Tue Jun 01 17:06:41 2010 +0200
+++ b/web/application.py	Thu Jun 03 10:17:44 2010 +0200
@@ -293,7 +293,7 @@
         # connect to the repository and get instance's schema
         self.repo = config.repository(vreg)
         if not vreg.initialized:
-            self.config.init_cubes(self.repo.get_cubes())
+            config.init_cubes(self.repo.get_cubes())
             vreg.init_properties(self.repo.properties())
             vreg.set_schema(self.repo.get_schema())
         # set the correct publish method