[repository api] definitly kill usage of word 'pool' to refer to connections set used by a session
Also document session's data storage and some other internals.
Hopefuly things will get clearer.
Closes #1684860: vocabulary confusion in repository code: notion of 'pool'
--- a/dataimport.py Thu May 19 10:36:26 2011 +0200
+++ b/dataimport.py Thu May 19 10:53:11 2011 +0200
@@ -445,14 +445,14 @@
ObjectStore.__init__(self)
if session is None:
sys.exit('please provide a session of run this script with cubicweb-ctl shell and pass cnx as session')
- if not hasattr(session, 'set_pool'):
+ if not hasattr(session, 'set_cnxset'):
# connection
cnx = session
session = session.request()
- session.set_pool = lambda : None
+ session.set_cnxset = lambda : None
commit = commit or cnx.commit
else:
- session.set_pool()
+ session.set_cnxset()
self.session = session
self._commit = commit or session.commit
@@ -462,7 +462,7 @@
def commit(self):
txuuid = self._commit()
- self.session.set_pool()
+ self.session.set_cnxset()
return txuuid
def rql(self, *args):
--- a/devtools/__init__.py Thu May 19 10:36:26 2011 +0200
+++ b/devtools/__init__.py Thu May 19 10:53:11 2011 +0200
@@ -93,7 +93,7 @@
""" Idea: this is less costly than a full re-creation of the repo object.
off:
* session are closed,
- * pools are closed
+ * cnxsets are closed
* system source is shutdown
"""
if not repo._needs_refresh:
@@ -104,8 +104,8 @@
repo.close(sessionid)
except BadConnectionId: #this is strange ? thread issue ?
print 'XXX unknown session', sessionid
- for pool in repo.pools:
- pool.close(True)
+ for cnxset in repo.cnxsets:
+ cnxset.close(True)
repo.system_source.shutdown()
repo._needs_refresh = True
repo._has_started = False
@@ -113,12 +113,12 @@
def turn_repo_on(repo):
"""Idea: this is less costly than a full re-creation of the repo object.
on:
- * pools are connected
+ * cnxsets are connected
* cache are cleared
"""
if repo._needs_refresh:
- for pool in repo.pools:
- pool.reconnect()
+ for cnxset in repo.cnxsets:
+ cnxset.reconnect()
repo._type_source_cache = {}
repo._extid_cache = {}
repo.querier._rql_cache = {}
@@ -477,12 +477,11 @@
repo = self.get_repo(startup=True)
cnx = self.get_cnx()
session = repo._sessions[cnx.sessionid]
- session.set_pool()
+ session.set_cnxset()
_commit = session.commit
- def always_pooled_commit():
- _commit()
- session.set_pool()
- session.commit = always_pooled_commit
+ def keep_cnxset_commit():
+ _commit(free_cnxset=False)
+ session.commit = keep_cnxset_commit
pre_setup_func(session, self.config)
session.commit()
cnx.close()
--- a/devtools/fake.py Thu May 19 10:36:26 2011 +0200
+++ b/devtools/fake.py Thu May 19 10:53:11 2011 +0200
@@ -146,7 +146,7 @@
if vreg is None:
vreg = CubicWebVRegistry(FakeConfig(), initlog=False)
self.vreg = vreg
- self.pool = FakePool()
+ self.cnxset = FakeConnectionsSet()
self.user = user or FakeUser()
self.is_internal_session = False
self.transaction_data = {}
@@ -210,6 +210,6 @@
self.uri = uri
-class FakePool(object):
+class FakeConnectionsSet(object):
def source(self, uri):
return FakeSource(uri)
--- a/devtools/repotest.py Thu May 19 10:36:26 2011 +0200
+++ b/devtools/repotest.py Thu May 19 10:53:11 2011 +0200
@@ -205,7 +205,7 @@
self.ueid = self.session.user.eid
assert self.ueid != -1
self.repo._type_source_cache = {} # clear cache
- self.pool = self.session.set_pool()
+ self.cnxset = self.session.set_cnxset()
self.maxeid = self.get_max_eid()
do_monkey_patch()
self._dumb_sessions = []
@@ -213,7 +213,7 @@
def get_max_eid(self):
return self.session.execute('Any MAX(X)')[0][0]
def cleanup(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.session.execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
def tearDown(self):
@@ -225,7 +225,7 @@
for session in self._dumb_sessions:
session.rollback()
session.close()
- self.repo._free_pool(self.pool)
+ self.repo._free_cnxset(self.cnxset)
assert self.session.user.eid != -1
def set_debug(self, debug):
@@ -263,7 +263,7 @@
u = self.repo._build_user(self.session, self.session.user.eid)
u._groups = set(groups)
s = Session(u, self.repo)
- s._threaddata.pool = self.pool
+ s._threaddata.cnxset = self.cnxset
s._threaddata.ctx_count = 1
# register session to ensure it gets closed
self._dumb_sessions.append(s)
@@ -274,7 +274,7 @@
def commit(self):
self.session.commit()
- self.session.set_pool()
+ self.session.set_cnxset()
class BasePlannerTC(BaseQuerierTC):
@@ -288,7 +288,7 @@
# XXX source_defs
self.o = self.repo.querier
self.session = self.repo._sessions.values()[0]
- self.pool = self.session.set_pool()
+ self.cnxset = self.session.set_cnxset()
self.schema = self.o.schema
self.sources = self.o._repo.sources
self.system = self.sources[-1]
@@ -312,7 +312,7 @@
del self.repo.sources_by_uri[source.uri]
undo_monkey_patch()
for session in self._dumb_sessions:
- session._threaddata.pool = None
+ session._threaddata.cnxset = None
session.close()
def _prepare_plan(self, rql, kwargs=None):
--- a/devtools/testlib.py Thu May 19 10:36:26 2011 +0200
+++ b/devtools/testlib.py Thu May 19 10:53:11 2011 +0200
@@ -274,7 +274,7 @@
def session(self):
"""return current server side session (using default manager account)"""
session = self.repo._sessions[self.cnx.sessionid]
- session.set_pool()
+ session.set_cnxset()
return session
@property
@@ -458,7 +458,7 @@
try:
return self.cnx.commit()
finally:
- self.session.set_pool() # ensure pool still set after commit
+ self.session.set_cnxset() # ensure cnxset still set after commit
@nocoverage
def rollback(self):
@@ -467,7 +467,7 @@
except dbapi.ProgrammingError:
pass # connection closed
finally:
- self.session.set_pool() # ensure pool still set after commit
+ self.session.set_cnxset() # ensure cnxset still set after commit
# # server side db api #######################################################
@@ -475,7 +475,7 @@
if eid_key is not None:
warn('[3.8] eid_key is deprecated, you can safely remove this argument',
DeprecationWarning, stacklevel=2)
- self.session.set_pool()
+ self.session.set_cnxset()
return self.session.execute(rql, args)
# other utilities #########################################################
--- a/entities/test/unittest_wfobjs.py Thu May 19 10:36:26 2011 +0200
+++ b/entities/test/unittest_wfobjs.py Thu May 19 10:53:11 2011 +0200
@@ -557,7 +557,7 @@
def setUp(self):
CubicWebTC.setUp(self)
self.wf = self.session.user.cw_adapt_to('IWorkflowable').current_workflow
- self.session.set_pool()
+ self.session.set_cnxset()
self.s_activated = self.wf.state_by_name('activated').eid
self.s_deactivated = self.wf.state_by_name('deactivated').eid
self.s_dummy = self.wf.add_state(u'dummy').eid
@@ -629,13 +629,13 @@
iworkflowable = user.cw_adapt_to('IWorkflowable')
iworkflowable.fire_transition('deactivate')
cnx.commit()
- session.set_pool()
+ session.set_cnxset()
with self.assertRaises(ValidationError) as cm:
iworkflowable.fire_transition('deactivate')
self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
u"transition isn't allowed from")
cnx.rollback()
- session.set_pool()
+ session.set_cnxset()
# get back now
iworkflowable.fire_transition('activate')
cnx.commit()
--- a/hooks/__init__.py Thu May 19 10:36:26 2011 +0200
+++ b/hooks/__init__.py Thu May 19 10:53:11 2011 +0200
@@ -67,7 +67,7 @@
except Exception, exc:
session.exception('while trying to update feed %s', source)
session.rollback()
- session.set_pool()
+ session.set_cnxset()
finally:
session.close()
self.repo.looping_task(60, update_feeds, self.repo)
--- a/hooks/syncschema.py Thu May 19 10:36:26 2011 +0200
+++ b/hooks/syncschema.py Thu May 19 10:53:11 2011 +0200
@@ -92,7 +92,7 @@
# create index before alter table which may expectingly fail during test
# (sqlite) while index creation should never fail (test for index existence
# is done by the dbhelper)
- session.pool.source('system').create_index(session, table, column)
+ session.cnxset.source('system').create_index(session, table, column)
session.info('added index on %s(%s)', table, column)
@@ -252,7 +252,7 @@
description=entity.description)
eschema = schema.add_entity_type(etype)
# create the necessary table
- tablesql = y2sql.eschema2sql(session.pool.source('system').dbhelper,
+ tablesql = y2sql.eschema2sql(session.cnxset.source('system').dbhelper,
eschema, prefix=SQL_PREFIX)
for sql in tablesql.split(';'):
if sql.strip():
@@ -289,7 +289,7 @@
self.session.vreg.schema.rename_entity_type(oldname, newname)
# we need sql to operate physical changes on the system database
sqlexec = self.session.system_sql
- dbhelper= self.session.pool.source('system').dbhelper
+ dbhelper= self.session.cnxset.source('system').dbhelper
sql = dbhelper.sql_rename_table(SQL_PREFIX+oldname,
SQL_PREFIX+newname)
sqlexec(sql)
@@ -433,7 +433,7 @@
# update the in-memory schema first
rdefdef = self.init_rdef(**props)
# then make necessary changes to the system source database
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
attrtype = y2sql.type_from_constraints(
syssource.dbhelper, rdefdef.object, rdefdef.constraints)
# XXX should be moved somehow into lgdb: sqlite doesn't support to
@@ -603,7 +603,7 @@
self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values)
rdef.update(self.values)
# then make necessary changes to the system source database
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
if 'indexed' in self.values:
syssource.update_rdef_indexed(session, rdef)
self.indexed_changed = True
@@ -621,7 +621,7 @@
# revert changes on in memory schema
self.rdef.update(self.oldvalues)
# revert changes on database
- syssource = self.session.pool.source('system')
+ syssource = self.session.cnxset.source('system')
if self.indexed_changed:
syssource.update_rdef_indexed(self.session, self.rdef)
if self.null_allowed_changed:
@@ -649,7 +649,7 @@
rdef.constraints.remove(self.oldcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
cstrtype = self.oldcstr.type()
if cstrtype == 'SizeConstraint':
syssource.update_rdef_column(session, rdef)
@@ -665,7 +665,7 @@
if self.oldcstr is not None:
self.rdef.constraints.append(self.oldcstr)
# revert changes on database
- syssource = self.session.pool.source('system')
+ syssource = self.session.cnxset.source('system')
if self.size_cstr_changed:
syssource.update_rdef_column(self.session, self.rdef)
if self.unique_changed:
@@ -696,7 +696,7 @@
rdef.constraints.append(newcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
if cstrtype == 'SizeConstraint' and (oldcstr is None or
oldcstr.max != newcstr.max):
syssource.update_rdef_column(session, rdef)
@@ -713,7 +713,7 @@
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.constraint_of[0].name)
cols = ['%s%s' % (prefix, r.name) for r in self.entity.relations]
- dbhelper= session.pool.source('system').dbhelper
+ dbhelper= session.cnxset.source('system').dbhelper
sqls = dbhelper.sqls_create_multicol_unique_index(table, cols)
for sql in sqls:
session.system_sql(sql)
@@ -733,7 +733,7 @@
session = self.session
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.type)
- dbhelper= session.pool.source('system').dbhelper
+ dbhelper= session.cnxset.source('system').dbhelper
cols = ['%s%s' % (prefix, c) for c in self.cols]
sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols)
for sql in sqls:
@@ -782,7 +782,7 @@
"""
def precommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections.cnxset has been commited"""
try:
erschema = self.session.vreg.schema.schema_by_eid(self.eid)
except KeyError:
@@ -811,7 +811,7 @@
"""
def precommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
erschema = self.session.vreg.schema.schema_by_eid(self.eid)
except KeyError:
@@ -1223,7 +1223,7 @@
source.fti_index_entities(session, [container])
if to_reindex:
# Transaction has already been committed
- session.pool.commit()
+ session.cnxset.commit()
--- a/hooks/syncsession.py Thu May 19 10:36:26 2011 +0200
+++ b/hooks/syncsession.py Thu May 19 10:53:11 2011 +0200
@@ -56,7 +56,7 @@
class _DeleteGroupOp(_GroupOperation):
"""synchronize user when a in_group relation has been deleted"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
groups = self.cnxuser.groups
try:
groups.remove(self.group)
@@ -67,7 +67,7 @@
class _AddGroupOp(_GroupOperation):
"""synchronize user when a in_group relation has been added"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
groups = self.cnxuser.groups
if self.group in groups:
self.warning('user %s already in group %s', self.cnxuser,
@@ -97,7 +97,7 @@
hook.Operation.__init__(self, session)
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
self.session.repo.close(self.cnxid)
except BadConnectionId:
@@ -122,7 +122,7 @@
"""a user's custom properties has been deleted"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
del self.cwpropdict[self.key]
except KeyError:
@@ -133,7 +133,7 @@
"""a user's custom properties has been added/changed"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
self.cwpropdict[self.key] = self.value
@@ -141,7 +141,7 @@
"""a user's custom properties has been added/changed"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
cwprop = self.cwprop
if not cwprop.for_user:
self.session.vreg['propertyvalues'][cwprop.pkey] = cwprop.value
--- a/hooks/test/unittest_syncschema.py Thu May 19 10:36:26 2011 +0200
+++ b/hooks/test/unittest_syncschema.py Thu May 19 10:53:11 2011 +0200
@@ -36,9 +36,9 @@
self.__class__.schema_eids = schema_eids_idx(self.repo.schema)
def index_exists(self, etype, attr, unique=False):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
return dbhelper.index_exists(sqlcursor, SQL_PREFIX + etype, SQL_PREFIX + attr, unique=unique)
def _set_perms(self, eid):
@@ -57,9 +57,9 @@
def test_base(self):
schema = self.repo.schema
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
self.failIf(schema.has_entity('Societe2'))
self.failIf(schema.has_entity('concerne2'))
# schema should be update on insertion (after commit)
@@ -170,9 +170,9 @@
# schema modification hooks tests #########################################
def test_uninline_relation(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
self.failUnless(self.schema['state_of'].inlined)
try:
self.execute('SET X inlined FALSE WHERE X name "state_of"')
@@ -195,9 +195,9 @@
self.assertEqual(len(rset), 2)
def test_indexed_change(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
try:
self.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"')
self.failUnless(self.schema['name'].rdef('Workflow', 'String').indexed)
@@ -214,9 +214,9 @@
self.failUnless(self.index_exists('Workflow', 'name'))
def test_unique_change(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
try:
self.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X '
'WHERE CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,'
--- a/misc/migration/bootstrapmigration_repository.py Thu May 19 10:36:26 2011 +0200
+++ b/misc/migration/bootstrapmigration_repository.py Thu May 19 10:53:11 2011 +0200
@@ -49,7 +49,7 @@
elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0):
CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
ask_confirm=False))
- session.set_pool()
+ session.set_cnxset()
permsdict = ss.deserialize_ertype_permissions(session)
with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
--- a/misc/scripts/drop_external_entities.py Thu May 19 10:36:26 2011 +0200
+++ b/misc/scripts/drop_external_entities.py Thu May 19 10:53:11 2011 +0200
@@ -3,7 +3,7 @@
sql("DELETE FROM entities WHERE type='Int'")
-ecnx = session.pool.connection(source)
+ecnx = session.cnxset.connection(source)
for e in rql('Any X WHERE X cw_source S, S name %(name)s', {'name': source}).entities():
meta = e.cw_metainformation()
assert meta['source']['uri'] == source
--- a/server/__init__.py Thu May 19 10:36:26 2011 +0200
+++ b/server/__init__.py Thu May 19 10:53:11 2011 +0200
@@ -230,7 +230,7 @@
for path in reversed(paths):
mhandler.exec_event_script('pre%s' % event, path)
# enter instance'schema into the database
- session.set_pool()
+ session.set_cnxset()
serialize_schema(session, schema)
# execute cubicweb's post<event> script
mhandler.exec_event_script('post%s' % event)
--- a/server/checkintegrity.py Thu May 19 10:36:26 2011 +0200
+++ b/server/checkintegrity.py Thu May 19 10:53:11 2011 +0200
@@ -101,7 +101,7 @@
# deactivate modification_date hook since we don't want them
# to be updated due to the reindexation
repo = session.repo
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
dbhelper = session.repo.system_source.dbhelper
if not dbhelper.has_fti_table(cursor):
print 'no text index table'
@@ -356,7 +356,7 @@
using given user and password to locally connect to the repository
(no running cubicweb server needed)
"""
- session = repo._get_session(cnx.sessionid, setpool=True)
+ session = repo._get_session(cnx.sessionid, setcnxset=True)
# yo, launch checks
if checks:
eids_cache = {}
@@ -372,6 +372,6 @@
print 'WARNING: Diagnostic run, nothing has been corrected'
if reindex:
cnx.rollback()
- session.set_pool()
+ session.set_cnxset()
reindex_entities(repo.schema, session, withpb=withpb)
cnx.commit()
--- a/server/hook.py Thu May 19 10:36:26 2011 +0200
+++ b/server/hook.py Thu May 19 10:53:11 2011 +0200
@@ -730,8 +730,8 @@
operation. These keyword arguments will be accessible as attributes from the
operation instance.
- An operation is triggered on connections pool events related to
- commit / rollback transations. Possible events are:
+ An operation is triggered on connections set events related to commit /
+ rollback transations. Possible events are:
* `precommit`:
@@ -805,7 +805,7 @@
getattr(self, event)()
def precommit_event(self):
- """the observed connections pool is preparing a commit"""
+ """the observed connections set is preparing a commit"""
def revertprecommit_event(self):
"""an error went when pre-commiting this operation or a later one
@@ -815,14 +815,13 @@
"""
def rollback_event(self):
- """the observed connections pool has been rollbacked
+ """the observed connections set has been rollbacked
- do nothing by default, the operation will just be removed from the pool
- operation list
+ do nothing by default
"""
def postcommit_event(self):
- """the observed connections pool has committed"""
+ """the observed connections set has committed"""
@property
@deprecated('[3.6] use self.session.user')
@@ -1098,7 +1097,7 @@
data_key = 'neweids'
def rollback_event(self):
- """the observed connections pool has been rollbacked,
+ """the observed connections set has been rollbacked,
remove inserted eid from repository type/source cache
"""
try:
@@ -1112,7 +1111,7 @@
"""
data_key = 'pendingeids'
def postcommit_event(self):
- """the observed connections pool has been rollbacked,
+ """the observed connections set has been rollbacked,
remove inserted eid from repository type/source cache
"""
try:
--- a/server/migractions.py Thu May 19 10:36:26 2011 +0200
+++ b/server/migractions.py Thu May 19 10:53:11 2011 +0200
@@ -201,7 +201,6 @@
versions = repo.get_versions()
for cube, version in versions.iteritems():
version_file.write('%s %s\n' % (cube, version))
-
if not failed:
bkup = tarfile.open(backupfile, 'w|gz')
for filename in os.listdir(tmpdir):
@@ -242,7 +241,7 @@
written_format = format_file.readline().strip()
if written_format in ('portable', 'native'):
format = written_format
- self.config.open_connections_pools = False
+ self.config.init_cnxset_pool = False
repo = self.repo_connect()
for source in repo.sources:
if systemonly and source.uri != 'system':
@@ -255,7 +254,7 @@
raise SystemExit(1)
shutil.rmtree(tmpdir)
# call hooks
- repo.open_connections_pools()
+ repo.init_cnxset_pool()
repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
print '-> database restored.'
@@ -288,7 +287,7 @@
except (KeyboardInterrupt, EOFError):
print 'aborting...'
sys.exit(0)
- self.session.keep_pool_mode('transaction')
+ self.session.keep_cnxset_mode('transaction')
self.session.data['rebuild-infered'] = False
return self._cnx
@@ -296,10 +295,10 @@
def session(self):
if self.config is not None:
session = self.repo._get_session(self.cnx.sessionid)
- if session.pool is None:
+ if session.cnxset is None:
session.set_read_security(False)
session.set_write_security(False)
- session.set_pool()
+ session.set_cnxset()
return session
# no access to session on remote instance
return None
@@ -308,13 +307,13 @@
if hasattr(self, '_cnx'):
self._cnx.commit()
if self.session:
- self.session.set_pool()
+ self.session.set_cnxset()
def rollback(self):
if hasattr(self, '_cnx'):
self._cnx.rollback()
if self.session:
- self.session.set_pool()
+ self.session.set_cnxset()
def rqlexecall(self, rqliter, ask_confirm=False):
for rql, kwargs in rqliter:
@@ -1360,7 +1359,7 @@
def _cw(self):
session = self.session
if session is not None:
- session.set_pool()
+ session.set_cnxset()
return session
return self.cnx.request()
--- a/server/pool.py Thu May 19 10:36:26 2011 +0200
+++ b/server/pool.py Thu May 19 10:53:11 2011 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,19 +15,18 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""CubicWeb server connections pool : the repository has a limited number of
-connections pools, each of them dealing with a set of connections on each source
-used by the repository. A connections pools (`ConnectionsPool`) is an
-abstraction for a group of connection to each source.
+"""CubicWeb server connections set : the repository has a limited number of
+:class:`ConnectionsSet` (defined in configuration, default to 4). Each of them
+hold a connection for each source used by the repository.
"""
__docformat__ = "restructuredtext en"
import sys
-class ConnectionsPool(object):
+class ConnectionsSet(object):
"""handle connections on a set of sources, at some point associated to a
- user session
+ :class:`Session`
"""
def __init__(self, sources):
@@ -81,9 +80,9 @@
self.reconnect(source)
def close(self, i_know_what_i_do=False):
- """close all connections in the pool"""
+ """close all connections in the set"""
if i_know_what_i_do is not True: # unexpected closing safety belt
- raise RuntimeError('pool shouldn\'t be closed')
+ raise RuntimeError('connections set shouldn\'t be closed')
for cu in self._cursors.values():
try:
cu.close()
@@ -97,17 +96,17 @@
# internals ###############################################################
- def pool_set(self):
- """pool is being set"""
+ def cnxset_set(self):
+ """connections set is being set on a session"""
self.check_connections()
- def pool_reset(self):
- """pool is being reseted"""
+ def cnxset_freed(self):
+ """connections set is being freed from a session"""
for source, cnx in self.source_cnxs.values():
- source.pool_reset(cnx)
+ source.cnxset_freed(cnx)
def sources(self):
- """return the source objects handled by this pool"""
+ """return the source objects handled by this connections set"""
# implementation details of flying insert requires the system source
# first
yield self.source_cnxs['system'][0]
--- a/server/querier.py Thu May 19 10:36:26 2011 +0200
+++ b/server/querier.py Thu May 19 10:53:11 2011 +0200
@@ -169,7 +169,7 @@
# session executing the query
self.session = session
# quick reference to the system source
- self.syssource = session.pool.source('system')
+ self.syssource = session.cnxset.source('system')
# execution steps
self.steps = []
# index of temporary tables created during execution
@@ -734,8 +734,8 @@
# transaction must been rollbacked
#
# notes:
- # * we should not reset the pool here, since we don't want the
- # session to loose its pool during processing
+ # * we should not reset the connections set here, since we don't want the
+ # session to loose it during processing
# * don't rollback if we're in the commit process, will be handled
# by the session
if session.commit_state is None:
--- a/server/repository.py Thu May 19 10:36:26 2011 +0200
+++ b/server/repository.py Thu May 19 10:53:11 2011 +0200
@@ -164,9 +164,9 @@
self._type_source_cache = {}
# cache (extid, source uri) -> eid
self._extid_cache = {}
- # open some connections pools
- if config.open_connections_pools:
- self.open_connections_pools()
+ # open some connections set
+ if config.init_cnxset_pool:
+ self.init_cnxset_pool()
@onevent('after-registry-reload', self)
def fix_user_classes(self):
usercls = self.vreg['etypes'].etype_class('CWUser')
@@ -174,10 +174,10 @@
if not isinstance(session.user, InternalManager):
session.user.__class__ = usercls
- def open_connections_pools(self):
+ def init_cnxset_pool(self):
config = self.config
- self._available_pools = Queue.Queue()
- self._available_pools.put_nowait(pool.ConnectionsPool(self.sources))
+ self._cnxsets_pool = Queue.Queue()
+ self._cnxsets_pool.put_nowait(pool.ConnectionsSet(self.sources))
if config.quick_start:
# quick start, usually only to get a minimal repository to get cubes
# information (eg dump/restore/...)
@@ -219,14 +219,14 @@
# configurate tsearch according to postgres version
for source in self.sources:
source.init_creating()
- # close initialization pool and reopen fresh ones for proper
+ # close initialization connetions set and reopen fresh ones for proper
# initialization now that we know cubes
- self._get_pool().close(True)
- # list of available pools (we can't iterate on Queue instance)
- self.pools = []
+ self._get_cnxset().close(True)
+ # list of available_cnxsets (we can't iterate on Queue instance)
+ self.cnxsets = []
for i in xrange(config['connections-pool-size']):
- self.pools.append(pool.ConnectionsPool(self.sources))
- self._available_pools.put_nowait(self.pools[-1])
+ self.cnxsets.append(pool.ConnectionsSet(self.sources))
+ self._cnxsets_pool.put_nowait(self.cnxsets[-1])
if config.quick_start:
config.init_cubes(self.get_cubes())
self.hm = hook.HooksManager(self.vreg)
@@ -249,7 +249,7 @@
self.sources_by_eid[sourceent.eid] = self.system_source
self.system_source.init(True, sourceent)
continue
- self.add_source(sourceent, add_to_pools=False)
+ self.add_source(sourceent, add_to_cnxsets=False)
finally:
session.close()
@@ -258,7 +258,7 @@
'can_cross_relation', 'rel_type_sources'):
clear_cache(self, cache)
- def add_source(self, sourceent, add_to_pools=True):
+ def add_source(self, sourceent, add_to_cnxsets=True):
source = self.get_source(sourceent.type, sourceent.name,
sourceent.host_config, sourceent.eid)
self.sources_by_eid[sourceent.eid] = source
@@ -266,15 +266,15 @@
if self.config.source_enabled(source):
# call source's init method to complete their initialisation if
# needed (for instance looking for persistent configuration using an
- # internal session, which is not possible until pools have been
+ # internal session, which is not possible until connections sets have been
# initialized)
source.init(True, sourceent)
if not source.copy_based_source:
self.sources.append(source)
self.querier.set_planner()
- if add_to_pools:
- for pool in self.pools:
- pool.add_source(source)
+ if add_to_cnxsets:
+ for cnxset in self.cnxsets:
+ cnxset.add_source(source)
else:
source.init(False, sourceent)
self._clear_planning_caches()
@@ -285,8 +285,8 @@
if self.config.source_enabled(source) and not source.copy_based_source:
self.sources.remove(source)
self.querier.set_planner()
- for pool in self.pools:
- pool.remove_source(source)
+ for cnxset in self.cnxsets:
+ cnxset.remove_source(source)
self._clear_planning_caches()
def get_source(self, type, uri, source_config, eid=None):
@@ -373,25 +373,25 @@
t.start()
#@locked
- def _get_pool(self):
+ def _get_cnxset(self):
try:
- return self._available_pools.get(True, timeout=5)
+ return self._cnxsets_pool.get(True, timeout=5)
except Queue.Empty:
- raise Exception('no pool available after 5 secs, probably either a '
+ raise Exception('no connections set available after 5 secs, probably either a '
'bug in code (too many uncommited/rollbacked '
'connections) or too much load on the server (in '
'which case you can try to set a bigger '
- 'connections pools size)')
+ 'connections pool size)')
- def _free_pool(self, pool):
- self._available_pools.put_nowait(pool)
+ def _free_cnxset(self, cnxset):
+ self._cnxsets_pool.put_nowait(cnxset)
def pinfo(self):
- # XXX: session.pool is accessed from a local storage, would be interesting
- # to see if there is a pool set in any thread specific data)
- return '%s: %s (%s)' % (self._available_pools.qsize(),
+ # XXX: session.cnxset is accessed from a local storage, would be interesting
+ # to see if there is a cnxset set in any thread specific data)
+ return '%s: %s (%s)' % (self._cnxsets_pool.qsize(),
','.join(session.user.login for session in self._sessions.values()
- if session.pool),
+ if session.cnxset),
threading.currentThread())
def shutdown(self):
"""called on server stop event to properly close opened sessions and
@@ -414,12 +414,12 @@
or self.config.quick_start):
self.hm.call_hooks('server_shutdown', repo=self)
self.close_sessions()
- while not self._available_pools.empty():
- pool = self._available_pools.get_nowait()
+ while not self._cnxsets_pool.empty():
+ cnxset = self._cnxsets_pool.get_nowait()
try:
- pool.close(True)
+ cnxset.close(True)
except:
- self.exception('error while closing %s' % pool)
+ self.exception('error while closing %s' % cnxset)
continue
if self.pyro_registered:
if self._use_pyrons():
@@ -501,7 +501,7 @@
results['nb_open_sessions'] = len(self._sessions)
results['nb_active_threads'] = threading.activeCount()
results['looping_tasks'] = ', '.join(str(t) for t in self._looping_tasks)
- results['available_pools'] = self._available_pools.qsize()
+ results['available_cnxsets'] = self._cnxsets_pool.qsize()
results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
return results
@@ -543,9 +543,9 @@
_, sourceuri, extid = self.type_and_source_from_eid(foreid)
if sourceuri == 'system':
return self.config[option]
- pool = self._get_pool()
+ cnxset = self._get_cnxset()
try:
- cnx = pool.connection(sourceuri)
+ cnx = cnxset.connection(sourceuri)
# needed to check connection is valid and usable by the current
# thread
newcnx = self.sources_by_uri[sourceuri].check_connection(cnx)
@@ -553,7 +553,7 @@
cnx = newcnx
return cnx.get_option_value(option, extid)
finally:
- self._free_pool(pool)
+ self._free_cnxset(cnxset)
@cached
def get_versions(self, checkversions=False):
@@ -726,7 +726,7 @@
* build_descr is a flag indicating if the description should be
built on select queries
"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
try:
rset = self.querier.execute(session, rqlstring, args,
@@ -752,21 +752,21 @@
self.exception('unexpected error while executing %s with %s', rqlstring, args)
raise
finally:
- session.reset_pool()
+ session.free_cnxset()
def describe(self, sessionid, eid, txid=None):
"""return a tuple (type, source, extid) for the entity with id <eid>"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.type_and_source_from_eid(eid, session)
finally:
- session.reset_pool()
+ session.free_cnxset()
def check_session(self, sessionid):
"""raise `BadConnectionId` if the connection is no more valid, else
return its latest activity timestamp.
"""
- return self._get_session(sessionid, setpool=False).timestamp
+ return self._get_session(sessionid, setcnxset=False).timestamp
def get_shared_data(self, sessionid, key, default=None, pop=False, txdata=False):
"""return value associated to key in the session's data dictionary or
@@ -777,7 +777,7 @@
If key isn't defined in the dictionnary, value specified by the
`default` argument will be returned.
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
return session.get_shared_data(key, default, pop, txdata)
def set_shared_data(self, sessionid, key, value, txdata=False):
@@ -787,7 +787,7 @@
transaction's data which are cleared on commit/rollback of the current
transaction.
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
session.set_shared_data(key, value, txdata)
def commit(self, sessionid, txid=None):
@@ -816,10 +816,10 @@
def close(self, sessionid, txid=None, checkshuttingdown=True):
"""close the session with the given id"""
- session = self._get_session(sessionid, setpool=True, txid=txid,
+ session = self._get_session(sessionid, setcnxset=True, txid=txid,
checkshuttingdown=checkshuttingdown)
# operation uncommited before close are rollbacked before hook is called
- session.rollback(reset_pool=False)
+ session.rollback(free_cnxset=False)
self.hm.call_hooks('session_close', session)
# commit session at this point in case write operation has been done
# during `session_close` hooks
@@ -834,7 +834,7 @@
* update user information on each user's request (i.e. groups and
custom properties)
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
if props is not None:
self.set_session_props(sessionid, props)
user = session.user
@@ -846,43 +846,43 @@
* update user information on each user's request (i.e. groups and
custom properties)
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
for prop, value in props.items():
session.change_property(prop, value)
def undoable_transactions(self, sessionid, ueid=None, txid=None,
**actionfilters):
"""See :class:`cubicweb.dbapi.Connection.undoable_transactions`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.undoable_transactions(session, ueid,
**actionfilters)
finally:
- session.reset_pool()
+ session.free_cnxset()
def transaction_info(self, sessionid, txuuid, txid=None):
"""See :class:`cubicweb.dbapi.Connection.transaction_info`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.tx_info(session, txuuid)
finally:
- session.reset_pool()
+ session.free_cnxset()
def transaction_actions(self, sessionid, txuuid, public=True, txid=None):
"""See :class:`cubicweb.dbapi.Connection.transaction_actions`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.tx_actions(session, txuuid, public)
finally:
- session.reset_pool()
+ session.free_cnxset()
def undo_transaction(self, sessionid, txuuid, txid=None):
"""See :class:`cubicweb.dbapi.Connection.undo_transaction`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.undo_transaction(session, txuuid)
finally:
- session.reset_pool()
+ session.free_cnxset()
# public (inter-repository) interface #####################################
@@ -934,14 +934,14 @@
"""return a dbapi like connection/cursor using internal user which
have every rights on the repository. You'll *have to* commit/rollback
or close (rollback implicitly) the session once the job's done, else
- you'll leak connections pool up to the time where no more pool is
+ you'll leak connections set up to the time where no one is
available, causing irremediable freeze...
"""
session = InternalSession(self, cnxprops)
- session.set_pool()
+ session.set_cnxset()
return session
- def _get_session(self, sessionid, setpool=False, txid=None,
+ def _get_session(self, sessionid, setcnxset=False, txid=None,
checkshuttingdown=True):
"""return the user associated to the given session identifier"""
if checkshuttingdown and self.shutting_down:
@@ -950,9 +950,9 @@
session = self._sessions[sessionid]
except KeyError:
raise BadConnectionId('No such session %s' % sessionid)
- if setpool:
- session.set_tx_data(txid) # must be done before set_pool
- session.set_pool()
+ if setcnxset:
+ session.set_tx_data(txid) # must be done before set_cnxset
+ session.set_cnxset()
return session
# data sources handling ###################################################
@@ -970,15 +970,15 @@
except KeyError:
if session is None:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
else:
- reset_pool = False
+ free_cnxset = False
try:
etype, uri, extid = self.system_source.eid_type_source(session,
eid)
finally:
- if reset_pool:
- session.reset_pool()
+ if free_cnxset:
+ session.free_cnxset()
self._type_source_cache[eid] = (etype, uri, extid)
if uri != 'system':
self._extid_cache[(extid, uri)] = eid
@@ -1039,16 +1039,16 @@
return self._extid_cache[cachekey]
except KeyError:
pass
- reset_pool = False
+ free_cnxset = False
if session is None:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
eid = self.system_source.extid2eid(session, uri, extid)
if eid is not None:
self._extid_cache[cachekey] = eid
self._type_source_cache[eid] = (etype, uri, extid)
- if reset_pool:
- session.reset_pool()
+ if free_cnxset:
+ session.free_cnxset()
return eid
if not insert:
return
@@ -1060,7 +1060,7 @@
# processing a commit, we have to use another one
if not session.is_internal_session:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
try:
eid = self.system_source.create_eid(session)
self._extid_cache[cachekey] = eid
@@ -1074,10 +1074,10 @@
source.after_entity_insertion(session, extid, entity, sourceparams)
if source.should_call_hooks:
self.hm.call_hooks('after_add_entity', session, entity=entity)
- session.commit(reset_pool)
+ session.commit(free_cnxset)
return eid
except:
- session.rollback(reset_pool)
+ session.rollback(free_cnxset)
raise
def add_info(self, session, entity, source, extid=None, complete=True):
--- a/server/schemaserial.py Thu May 19 10:36:26 2011 +0200
+++ b/server/schemaserial.py Thu May 19 10:53:11 2011 +0200
@@ -88,7 +88,7 @@
repo = session.repo
dbhelper = repo.system_source.dbhelper
# XXX bw compat (3.6 migration)
- sqlcu = session.pool['system']
+ sqlcu = session.cnxset['system']
sqlcu.execute("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
if sqlcu.fetchall():
sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
--- a/server/serverconfig.py Thu May 19 10:36:26 2011 +0200
+++ b/server/serverconfig.py Thu May 19 10:53:11 2011 +0200
@@ -130,7 +130,7 @@
('connections-pool-size',
{'type' : 'int',
'default': 4,
- 'help': 'size of the connections pools. Each source supporting multiple \
+ 'help': 'size of the connections pool. Each source supporting multiple \
connections will have this number of opened connections.',
'group': 'main', 'level': 3,
}),
@@ -209,9 +209,9 @@
}),
) + CubicWebConfiguration.options)
- # should we open connections pools (eg connect to sources). This is usually
- # necessary...
- open_connections_pools = True
+ # should we init the connections pool (eg connect to sources). This is
+ # usually necessary...
+ init_cnxset_pool = True
# read the schema from the database
read_instance_schema = True
--- a/server/serverctl.py Thu May 19 10:36:26 2011 +0200
+++ b/server/serverctl.py Thu May 19 10:53:11 2011 +0200
@@ -970,7 +970,7 @@
appid = args[0]
config = ServerConfiguration.config_for(appid)
repo, cnx = repo_cnx(config)
- session = repo._get_session(cnx.sessionid, setpool=True)
+ session = repo._get_session(cnx.sessionid, setcnxset=True)
reindex_entities(repo.schema, session)
cnx.commit()
--- a/server/session.py Thu May 19 10:36:26 2011 +0200
+++ b/server/session.py Thu May 19 10:53:11 2011 +0200
@@ -130,8 +130,76 @@
class Session(RequestSessionBase):
- """tie session id, user, connections pool and other session data all
- together
+ """Repository usersession, tie a session id, user, connections set and
+ other session data all together.
+
+ About session storage / transactions
+ ------------------------------------
+
+ Here is a description of internal session attributes. Besides :attr:`data`
+ and :attr:`transaction_data`, you should not have to use attributes
+ described here but higher level APIs.
+
+ :attr:`data` is a dictionary containing shared data, used to communicate
+ extra information between the client and the repository
+
+ :attr:`_tx_data` is a dictionary of :class:`TransactionData` instance, one
+ for each running transaction. The key is the transaction id. By default
+ the transaction id is the thread name but it can be otherwise (per dbapi
+ cursor for instance, or per thread name *from another process*).
+
+ :attr:`__threaddata` is a thread local storage whose `txdata` attribute
+ refers to the proper instance of :class:`TransactionData` according to the
+ transaction.
+
+ :attr:`_threads_in_transaction` is a set of (thread, connections set)
+ referencing threads that currently hold a connections set for the session.
+
+ You should not have to use neither :attr:`_txdata` nor :attr:`__threaddata`,
+ simply access transaction data transparently through the :attr:`_threaddata`
+ property. Also, you usually don't have to access it directly since current
+ transaction's data may be accessed/modified through properties / methods:
+
+ :attr:`transaction_data`, similarly to :attr:`data`, is a dictionary
+ containing some shared data that should be cleared at the end of the
+ transaction. Hooks and operations may put arbitrary data in there, and
+ this may also be used as a communication channel between the client and
+ the repository.
+
+ :attr:`cnxset`, the connections set to use to execute queries on sources.
+ During a transaction, the connection set may be freed so that is may be
+ used by another session as long as no writing is done. This means we can
+ have multiple sessions with a reasonably low connections set pool size.
+
+ :attr:`mode`, string telling the connections set handling mode, may be one
+ of 'read' (connections set may be freed), 'write' (some write was done in
+ the connections set, it can't be freed before end of the transaction),
+ 'transaction' (we want to keep the connections set during all the
+ transaction, with or without writing)
+
+ :attr:`pending_operations`, ordered list of operations to be processed on
+ commit/rollback
+
+ :attr:`commit_state`, describing the transaction commit state, may be one
+ of None (not yet committing), 'precommit' (calling precommit event on
+ operations), 'postcommit' (calling postcommit event on operations),
+ 'uncommitable' (some :exc:`ValidationError` or :exc:`Unauthorized` error
+ has been raised during the transaction and so it must be rollbacked).
+
+ :attr:`read_security` and :attr:`write_security`, boolean flags telling if
+ read/write security is currently activated.
+
+ :attr:`hooks_mode`, may be either `HOOKS_ALLOW_ALL` or `HOOKS_DENY_ALL`.
+
+ :attr:`enabled_hook_categories`, when :attr:`hooks_mode` is
+ `HOOKS_DENY_ALL`, this set contains hooks categories that are enabled.
+
+ :attr:`disabled_hook_categories`, when :attr:`hooks_mode` is
+ `HOOKS_ALLOW_ALL`, this set contains hooks categories that are disabled.
+
+
+ :attr:`running_dbapi_query`, boolean flag telling if the executing query
+ is coming from a dbapi connection or is a query from within the repository
"""
is_internal_session = False
@@ -190,8 +258,8 @@
"""return a fake request/session using specified user"""
session = Session(user, self.repo)
threaddata = session._threaddata
- threaddata.pool = self.pool
- # we attributed a pool, need to update ctx_count else it will be freed
+ threaddata.cnxset = self.cnxset
+ # we attributed a connections set, need to update ctx_count else it will be freed
# while undesired
threaddata.ctx_count = 1
# share pending_operations, else operation added in the hi-jacked
@@ -335,14 +403,14 @@
"""return a sql cursor on the system database"""
if sql.split(None, 1)[0].upper() != 'SELECT':
self.mode = 'write'
- source = self.pool.source('system')
+ source = self.cnxset.source('system')
try:
return source.doexec(self, sql, args, rollback=rollback_on_failure)
except (source.OperationalError, source.InterfaceError):
if not rollback_on_failure:
raise
source.warning("trying to reconnect")
- self.pool.reconnect(source)
+ self.cnxset.reconnect(source)
return source.doexec(self, sql, args, rollback=rollback_on_failure)
def set_language(self, language):
@@ -606,19 +674,19 @@
# connection management ###################################################
- def keep_pool_mode(self, mode):
- """set pool_mode, e.g. how the session will keep its pool:
+ def keep_cnxset_mode(self, mode):
+ """set `mode`, e.g. how the session will keep its connections set:
- * if mode == 'write', the pool is freed after each ready query, but kept
- until the transaction's end (eg commit or rollback) when a write query
- is detected (eg INSERT/SET/DELETE queries)
+ * if mode == 'write', the connections set is freed after each ready
+ query, but kept until the transaction's end (eg commit or rollback)
+ when a write query is detected (eg INSERT/SET/DELETE queries)
- * if mode == 'transaction', the pool is only freed after the
+ * if mode == 'transaction', the connections set is only freed after the
transaction's end
- notice that a repository has a limited set of pools, and a session has to
- wait for a free pool to run any rql query (unless it already has a pool
- set).
+ notice that a repository has a limited set of connections sets, and a
+ session has to wait for a free connections set to run any rql query
+ (unless it already has one set).
"""
assert mode in ('transaction', 'write')
if mode == 'transaction':
@@ -641,57 +709,57 @@
commit_state = property(get_commit_state, set_commit_state)
@property
- def pool(self):
- """connections pool, set according to transaction mode for each query"""
+ def cnxset(self):
+ """connections set, set according to transaction mode for each query"""
if self._closed:
- self.reset_pool(True)
- raise Exception('try to access pool on a closed session')
- return getattr(self._threaddata, 'pool', None)
+ self.free_cnxset(True)
+ raise Exception('try to access connections set on a closed session')
+ return getattr(self._threaddata, 'cnxset', None)
- def set_pool(self):
- """the session need a pool to execute some queries"""
+ def set_cnxset(self):
+ """the session need a connections set to execute some queries"""
with self._closed_lock:
if self._closed:
- self.reset_pool(True)
- raise Exception('try to set pool on a closed session')
- if self.pool is None:
- # get pool first to avoid race-condition
- self._threaddata.pool = pool = self.repo._get_pool()
+ self.free_cnxset(True)
+ raise Exception('try to set connections set on a closed session')
+ if self.cnxset is None:
+ # get connections set first to avoid race-condition
+ self._threaddata.cnxset = cnxset = self.repo._get_cnxset()
self._threaddata.ctx_count += 1
try:
- pool.pool_set()
+ cnxset.cnxset_set()
except:
- self._threaddata.pool = None
- self.repo._free_pool(pool)
+ self._threaddata.cnxset = None
+ self.repo._free_cnxset(cnxset)
raise
self._threads_in_transaction.add(
- (threading.currentThread(), pool) )
- return self._threaddata.pool
+ (threading.currentThread(), cnxset) )
+ return self._threaddata.cnxset
- def _free_thread_pool(self, thread, pool, force_close=False):
+ def _free_thread_cnxset(self, thread, cnxset, force_close=False):
try:
- self._threads_in_transaction.remove( (thread, pool) )
+ self._threads_in_transaction.remove( (thread, cnxset) )
except KeyError:
- # race condition on pool freeing (freed by commit or rollback vs
+ # race condition on cnxset freeing (freed by commit or rollback vs
# close)
pass
else:
if force_close:
- pool.reconnect()
+ cnxset.reconnect()
else:
- pool.pool_reset()
- # free pool once everything is done to avoid race-condition
- self.repo._free_pool(pool)
+ cnxset.cnxset_freed()
+ # free cnxset once everything is done to avoid race-condition
+ self.repo._free_cnxset(cnxset)
- def reset_pool(self, ignoremode=False):
- """the session is no longer using its pool, at least for some time"""
- # pool may be none if no operation has been done since last commit
+ def free_cnxset(self, ignoremode=False):
+ """the session is no longer using its connections set, at least for some time"""
+ # cnxset may be none if no operation has been done since last commit
# or rollback
- pool = getattr(self._threaddata, 'pool', None)
- if pool is not None and (ignoremode or self.mode == 'read'):
+ cnxset = getattr(self._threaddata, 'cnxset', None)
+ if cnxset is not None and (ignoremode or self.mode == 'read'):
# even in read mode, we must release the current transaction
- self._free_thread_pool(threading.currentThread(), pool)
- del self._threaddata.pool
+ self._free_thread_cnxset(threading.currentThread(), cnxset)
+ del self._threaddata.cnxset
self._threaddata.ctx_count -= 1
def _touch(self):
@@ -781,9 +849,9 @@
rset.req = self
return rset
- def _clear_thread_data(self, reset_pool=True):
- """remove everything from the thread local storage, except pool
- which is explicitly removed by reset_pool, and mode which is set anyway
+ def _clear_thread_data(self, free_cnxset=True):
+ """remove everything from the thread local storage, except connections set
+ which is explicitly removed by free_cnxset, and mode which is set anyway
by _touch
"""
try:
@@ -791,8 +859,8 @@
except AttributeError:
pass
else:
- if reset_pool:
- self.reset_pool()
+ if free_cnxset:
+ self.free_cnxset()
if txstore.ctx_count == 0:
self._clear_thread_storage(txstore)
else:
@@ -816,9 +884,13 @@
except AttributeError:
continue
- def commit(self, reset_pool=True):
+ def commit(self, free_cnxset=True, reset_pool=None):
"""commit the current session's transaction"""
- if self.pool is None:
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ if self.cnxset is None:
assert not self.pending_operations
self._clear_thread_data()
self._touch()
@@ -867,9 +939,9 @@
# XXX use slice notation since self.pending_operations is a
# read-only property.
self.pending_operations[:] = processed + self.pending_operations
- self.rollback(reset_pool)
+ self.rollback(free_cnxset)
raise
- self.pool.commit()
+ self.cnxset.commit()
self.commit_state = 'postcommit'
while self.pending_operations:
operation = self.pending_operations.pop(0)
@@ -883,15 +955,19 @@
return self.transaction_uuid(set=False)
finally:
self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
- self._clear_thread_data(reset_pool)
+ if free_cnxset:
+ self.free_cnxset(ignoremode=True)
+ self._clear_thread_data(free_cnxset)
- def rollback(self, reset_pool=True):
+ def rollback(self, free_cnxset=True, reset_pool=None):
"""rollback the current session's transaction"""
- # don't use self.pool, rollback may be called with _closed == True
- pool = getattr(self._threaddata, 'pool', None)
- if pool is None:
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ # don't use self.cnxset, rollback may be called with _closed == True
+ cnxset = getattr(self._threaddata, 'cnxset', None)
+ if cnxset is None:
self._clear_thread_data()
self._touch()
self.debug('rollback session %s done (no db activity)', self.id)
@@ -906,20 +982,20 @@
except:
self.critical('rollback error', exc_info=sys.exc_info())
continue
- pool.rollback()
+ cnxset.rollback()
self.debug('rollback for session %s done', self.id)
finally:
self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
- self._clear_thread_data(reset_pool)
+ if free_cnxset:
+ self.free_cnxset(ignoremode=True)
+ self._clear_thread_data(free_cnxset)
def close(self):
- """do not close pool on session close, since they are shared now"""
+ """do not close connections set on session close, since they are shared now"""
with self._closed_lock:
self._closed = True
# copy since _threads_in_transaction maybe modified while waiting
- for thread, pool in self._threads_in_transaction.copy():
+ for thread, cnxset in self._threads_in_transaction.copy():
if thread is threading.currentThread():
continue
self.info('waiting for thread %s', thread)
@@ -929,12 +1005,12 @@
for i in xrange(10):
thread.join(1)
if not (thread.isAlive() and
- (thread, pool) in self._threads_in_transaction):
+ (thread, cnxset) in self._threads_in_transaction):
break
else:
self.error('thread %s still alive after 10 seconds, will close '
'session anyway', thread)
- self._free_thread_pool(thread, pool, force_close=True)
+ self._free_thread_cnxset(thread, cnxset, force_close=True)
self.rollback()
del self.__threaddata
del self._tx_data
@@ -970,8 +1046,7 @@
return self._threaddata.pruned_hooks_cache
def add_operation(self, operation, index=None):
- """add an observer"""
- assert self.commit_state != 'commit'
+ """add an operation"""
if index is None:
self.pending_operations.append(operation)
else:
@@ -1078,6 +1153,19 @@
# deprecated ###############################################################
+ @property
+ @deprecated("[3.13] use .cnxset attribute instead of .pool")
+ def pool(self):
+ return self.cnxset
+
+ @deprecated("[3.13] use .set_cnxset() method instead of .set_pool()")
+ def set_pool(self):
+ return self.set_cnxset()
+
+ @deprecated("[3.13] use .free_cnxset() method instead of .reset_pool()")
+ def reset_pool(self):
+ return self.free_cnxset()
+
@deprecated("[3.7] execute is now unsafe by default in hooks/operation. You"
" can also control security with the security_enabled context "
"manager")
@@ -1143,12 +1231,12 @@
self.disable_hook_categories('integrity')
@property
- def pool(self):
- """connections pool, set according to transaction mode for each query"""
+ def cnxset(self):
+ """connections set, set according to transaction mode for each query"""
if self.repo.shutting_down:
- self.reset_pool(True)
+ self.free_cnxset(True)
raise Exception('repository is shutting down')
- return getattr(self._threaddata, 'pool', None)
+ return getattr(self._threaddata, 'cnxset', None)
class InternalManager(object):
--- a/server/sources/__init__.py Thu May 19 10:36:26 2011 +0200
+++ b/server/sources/__init__.py Thu May 19 10:53:11 2011 +0200
@@ -230,23 +230,23 @@
def check_connection(self, cnx):
"""Check connection validity, return None if the connection is still
- valid else a new connection (called when the pool using the given
- connection is being attached to a session). Do nothing by default.
+ valid else a new connection (called when the connections set using the
+ given connection is being attached to a session). Do nothing by default.
"""
pass
- def close_pool_connections(self):
- for pool in self.repo.pools:
- pool._cursors.pop(self.uri, None)
- pool.source_cnxs[self.uri][1].close()
+ def close_source_connections(self):
+ for cnxset in self.repo.cnxsets:
+ cnxset._cursors.pop(self.uri, None)
+ cnxset.source_cnxs[self.uri][1].close()
- def open_pool_connections(self):
- for pool in self.repo.pools:
- pool.source_cnxs[self.uri] = (self, self.get_connection())
+ def open_source_connections(self):
+ for cnxset in self.repo.cnxsets:
+ cnxset.source_cnxs[self.uri] = (self, self.get_connection())
- def pool_reset(self, cnx):
- """the pool using the given connection is being reseted from its current
- attached session
+ def cnxset_freed(self, cnx):
+ """the connections set holding the given connection is being reseted
+ from its current attached session.
do nothing by default
"""
@@ -404,7 +404,7 @@
.executemany().
"""
res = self.syntax_tree_search(session, union, args, varmap=varmap)
- session.pool.source('system').manual_insert(res, table, session)
+ session.cnxset.source('system').manual_insert(res, table, session)
# write modification api ###################################################
# read-only sources don't have to implement methods below
--- a/server/sources/datafeed.py Thu May 19 10:36:26 2011 +0200
+++ b/server/sources/datafeed.py Thu May 19 10:53:11 2011 +0200
@@ -254,17 +254,17 @@
try:
self.process_item(*args)
if partialcommit:
- # commit+set_pool instead of commit(reset_pool=False) to let
- # other a chance to get our pool
+ # commit+set_cnxset instead of commit(free_cnxset=False) to let
+ # other a chance to get our connections set
self._cw.commit()
- self._cw.set_pool()
+ self._cw.set_cnxset()
except ValidationError, exc:
if raise_on_error:
raise
if partialcommit:
self.source.error('Skipping %s because of validation error %s' % (args, exc))
self._cw.rollback()
- self._cw.set_pool()
+ self._cw.set_cnxset()
error = True
else:
raise
--- a/server/sources/extlite.py Thu May 19 10:36:26 2011 +0200
+++ b/server/sources/extlite.py Thu May 19 10:53:11 2011 +0200
@@ -102,19 +102,19 @@
def backup(self, backupfile, confirm):
"""method called to create a backup of the source's data"""
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.sqladapter.backup_to_file(backupfile, confirm)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
def restore(self, backupfile, confirm, drop):
"""method called to restore a backup of source's data"""
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.sqladapter.restore_from_file(backupfile, confirm, drop)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
@property
def _sqlcnx(self):
@@ -174,15 +174,15 @@
def check_connection(self, cnx):
"""check connection validity, return None if the connection is still valid
- else a new connection (called when the pool using the given connection is
+ else a new connection (called when the connections set holding the given connection is
being attached to a session)
always return the connection to reset eventually cached cursor
"""
return cnx
- def pool_reset(self, cnx):
- """the pool using the given connection is being reseted from its current
+ def cnxset_freed(self, cnx):
+ """the connections set holding the given connection is being freed from its current
attached session: release the connection lock if the connection wrapper
has a connection set
"""
@@ -286,7 +286,7 @@
"""
if server.DEBUG:
print 'exec', query, args
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
try:
# str(query) to avoid error if it's an unicode string
cursor.execute(str(query), args)
@@ -294,7 +294,7 @@
self.critical("sql: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
self.critical('transaction has been rollbacked')
except:
pass
--- a/server/sources/ldapuser.py Thu May 19 10:36:26 2011 +0200
+++ b/server/sources/ldapuser.py Thu May 19 10:53:11 2011 +0200
@@ -524,9 +524,9 @@
"""make an ldap query"""
self.debug('ldap search %s %s %s %s %s', self.uri, base, scope,
searchstr, list(attrs))
- # XXX for now, we do not have connection pool support for LDAP, so
+ # XXX for now, we do not have connections set support for LDAP, so
# this is always self._conn
- cnx = session.pool.connection(self.uri).cnx
+ cnx = session.cnxset.connection(self.uri).cnx
try:
res = cnx.search_s(base, scope, searchstr, attrs)
except ldap.PARTIAL_RESULTS:
--- a/server/sources/native.py Thu May 19 10:36:26 2011 +0200
+++ b/server/sources/native.py Thu May 19 10:53:11 2011 +0200
@@ -304,9 +304,9 @@
self.dbhelper.dbname = abspath(self.dbhelper.dbname)
self.get_connection = lambda: ConnectionWrapper(self)
self.check_connection = lambda cnx: cnx
- def pool_reset(cnx):
+ def cnxset_freed(cnx):
cnx.close()
- self.pool_reset = pool_reset
+ self.cnxset_freed = cnxset_freed
if self.dbdriver == 'sqlite':
self._create_eid = None
self.create_eid = self._create_eid_sqlite
@@ -346,21 +346,21 @@
"""execute the query and return its result"""
return self.process_result(self.doexec(session, sql, args))
- def init_creating(self, pool=None):
+ def init_creating(self, cnxset=None):
# check full text index availibility
if self.do_fti:
- if pool is None:
- _pool = self.repo._get_pool()
- _pool.pool_set()
+ if cnxset is None:
+ _cnxset = self.repo._get_cnxset()
+ _cnxset.cnxset_set()
else:
- _pool = pool
- if not self.dbhelper.has_fti_table(_pool['system']):
+ _cnxset = cnxset
+ if not self.dbhelper.has_fti_table(_cnxset['system']):
if not self.repo.config.creating:
self.critical('no text index table')
self.do_fti = False
- if pool is None:
- _pool.pool_reset()
- self.repo._free_pool(_pool)
+ if cnxset is None:
+ _cnxset.cnxset_freed()
+ self.repo._free_cnxset(_cnxset)
def backup(self, backupfile, confirm, format='native'):
"""method called to create a backup of the source's data"""
@@ -368,25 +368,25 @@
self.repo.fill_schema()
self.set_schema(self.repo.schema)
helper = DatabaseIndependentBackupRestore(self)
- self.close_pool_connections()
+ self.close_source_connections()
try:
helper.backup(backupfile)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
elif format == 'native':
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.backup_to_file(backupfile, confirm)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
else:
raise ValueError('Unknown format %r' % format)
def restore(self, backupfile, confirm, drop, format='native'):
"""method called to restore a backup of source's data"""
- if self.repo.config.open_connections_pools:
- self.close_pool_connections()
+ if self.repo.config.init_cnxset_pool:
+ self.close_source_connections()
try:
if format == 'portable':
helper = DatabaseIndependentBackupRestore(self)
@@ -396,12 +396,12 @@
else:
raise ValueError('Unknown format %r' % format)
finally:
- if self.repo.config.open_connections_pools:
- self.open_pool_connections()
+ if self.repo.config.init_cnxset_pool:
+ self.open_source_connections()
def init(self, activated, source_entity):
- self.init_creating(source_entity._cw.pool)
+ self.init_creating(source_entity._cw.cnxset)
def shutdown(self):
if self._eid_creation_cnx:
@@ -523,13 +523,13 @@
raise
# FIXME: better detection of deconnection pb
self.warning("trying to reconnect")
- session.pool.reconnect(self)
+ session.cnxset.reconnect(self)
cursor = self.doexec(session, sql, args)
except (self.DbapiError,), exc:
# We get this one with pyodbc and SQL Server when connection was reset
if exc.args[0] == '08S01' and session.mode != 'write':
self.warning("trying to reconnect")
- session.pool.reconnect(self)
+ session.cnxset.reconnect(self)
cursor = self.doexec(session, sql, args)
else:
raise
@@ -718,9 +718,9 @@
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
if server.DEBUG & server.DBG_SQL:
- cnx = session.pool.connection(self.uri)
+ cnx = session.cnxset.connection(self.uri)
# getattr to get the actual connection if cnx is a ConnectionWrapper
# instance
print 'exec', query, args, getattr(cnx, '_cnx', cnx)
@@ -735,7 +735,7 @@
query, args, ex.args[0])
if rollback:
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rollbacked')
except:
@@ -764,7 +764,7 @@
"""
if server.DEBUG & server.DBG_SQL:
print 'execmany', query, 'with', len(args), 'arguments'
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
try:
# str(query) to avoid error if it's an unicode string
cursor.executemany(str(query), args)
@@ -775,7 +775,7 @@
self.critical("sql many: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rollbacked')
except:
@@ -793,7 +793,7 @@
self.error("backend can't alter %s.%s to %s%s", table, column, coltype,
not allownull and 'NOT NULL' or '')
return
- self.dbhelper.change_col_type(LogCursor(session.pool[self.uri]),
+ self.dbhelper.change_col_type(LogCursor(session.cnxset[self.uri]),
table, column, coltype, allownull)
self.info('altered %s.%s: now %s%s', table, column, coltype,
not allownull and 'NOT NULL' or '')
@@ -808,7 +808,7 @@
return
table, column = rdef_table_column(rdef)
coltype, allownull = rdef_physical_info(self.dbhelper, rdef)
- self.dbhelper.set_null_allowed(LogCursor(session.pool[self.uri]),
+ self.dbhelper.set_null_allowed(LogCursor(session.cnxset[self.uri]),
table, column, coltype, allownull)
def update_rdef_indexed(self, session, rdef):
@@ -826,11 +826,11 @@
self.drop_index(session, table, column, unique=True)
def create_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.pool[self.uri])
+ cursor = LogCursor(session.cnxset[self.uri])
self.dbhelper.create_index(cursor, table, column, unique)
def drop_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.pool[self.uri])
+ cursor = LogCursor(session.cnxset[self.uri])
self.dbhelper.drop_index(cursor, table, column, unique)
# system source interface #################################################
@@ -841,7 +841,7 @@
try:
res = self.doexec(session, sql).fetchone()
except:
- assert session.pool, 'session has no pool set'
+ assert session.cnxset, 'session has no connections set'
raise UnknownEid(eid)
if res is None:
raise UnknownEid(eid)
@@ -1135,7 +1135,7 @@
important note: while undoing of a transaction, only hooks in the
'integrity', 'activeintegrity' and 'undo' categories are called.
"""
- # set mode so pool isn't released subsquently until commit/rollback
+ # set mode so connections set isn't released subsquently until commit/rollback
session.mode = 'write'
errors = []
session.transaction_data['undoing_uuid'] = txuuid
@@ -1380,7 +1380,7 @@
def fti_unindex_entities(self, session, entities):
"""remove text content for entities from the full text index
"""
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
cursor_unindex_object = self.dbhelper.cursor_unindex_object
try:
for entity in entities:
@@ -1393,7 +1393,7 @@
"""add text content of created/modified entities to the full text index
"""
cursor_index_object = self.dbhelper.cursor_index_object
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
try:
# use cursor_index_object, not cursor_reindex_object since
# unindexing done in the FTIndexEntityOp
--- a/server/sources/pyrorql.py Thu May 19 10:36:26 2011 +0200
+++ b/server/sources/pyrorql.py Thu May 19 10:53:11 2011 +0200
@@ -237,7 +237,7 @@
return self.repo.extid2eid(self, str(extid), etype, session), True
if dexturi in self.repo.sources_by_uri:
source = self.repo.sources_by_uri[dexturi]
- cnx = session.pool.connection(source.uri)
+ cnx = session.cnxset.connection(source.uri)
eid = source.local_eid(cnx, dextid, session)[0]
return eid, False
return None, None
@@ -322,7 +322,7 @@
else a new connection
"""
# we have to transfer manually thread ownership. This can be done safely
- # since the pool to which belong the connection is affected to one
+ # since the connections set holding the connection is affected to one
# session/thread and can't be called simultaneously
try:
cnx._repo._transferThread(threading.currentThread())
@@ -359,7 +359,7 @@
if not args is None:
args = args.copy()
# get cached cursor anyway
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
if cu is None:
# this is a ConnectionWrapper instance
msg = session._("can't connect to source %s, some data may be missing")
@@ -390,7 +390,7 @@
or uidtype(union, i, etype, args)):
needtranslation.append(i)
if needtranslation:
- cnx = session.pool.connection(self.uri)
+ cnx = session.cnxset.connection(self.uri)
for rowindex in xrange(rset.rowcount - 1, -1, -1):
row = rows[rowindex]
localrow = False
@@ -434,21 +434,21 @@
def update_entity(self, session, entity):
"""update an entity in the source"""
relations, kwargs = self._entity_relations_and_kwargs(session, entity)
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs)
self._query_cache.clear()
entity.cw_clear_all_caches()
def delete_entity(self, session, entity):
"""delete an entity from the source"""
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('DELETE %s X WHERE X eid %%(x)s' % entity.__regid__,
{'x': self.eid2extid(entity.eid, session)})
self._query_cache.clear()
def add_relation(self, session, subject, rtype, object):
"""add a relation to the source"""
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
{'x': self.eid2extid(subject, session),
'y': self.eid2extid(object, session)})
@@ -458,7 +458,7 @@
def delete_relation(self, session, subject, rtype, object):
"""delete a relation from the source"""
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('DELETE X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
{'x': self.eid2extid(subject, session),
'y': self.eid2extid(object, session)})
--- a/server/sources/storages.py Thu May 19 10:36:26 2011 +0200
+++ b/server/sources/storages.py Thu May 19 10:53:11 2011 +0200
@@ -199,7 +199,7 @@
return fspath
def current_fs_path(self, entity, attr):
- sysource = entity._cw.pool.source('system')
+ sysource = entity._cw.cnxset.source('system')
cu = sysource.doexec(entity._cw,
'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
attr, entity.__regid__, entity.eid))
--- a/server/test/unittest_hook.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_hook.py Thu May 19 10:53:11 2011 +0200
@@ -201,10 +201,10 @@
# self.assertEqual(self.called, [(1, 'concerne', 2), (3, 'concerne', 4)])
-# def _before_relation_hook(self, pool, subject, r_type, object):
+# def _before_relation_hook(self, cnxset, subject, r_type, object):
# self.called.append((subject, r_type, object))
-# def _after_relation_hook(self, pool, subject, r_type, object):
+# def _after_relation_hook(self, cnxset, subject, r_type, object):
# self.called.append((subject, r_type, object))
--- a/server/test/unittest_ldapuser.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_ldapuser.py Thu May 19 10:53:11 2011 +0200
@@ -137,7 +137,7 @@
def test_authenticate(self):
source = self.repo.sources_by_uri['ldapuser']
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(AuthenticationError,
source.authenticate, self.session, 'toto', 'toto')
@@ -265,7 +265,7 @@
self.failUnless(self.sexecute('Any X,Y WHERE X login %(syt)s, Y login "cochon"', {'syt': SYT}))
def test_exists1(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.session.create_entity('CWGroup', name=u'bougloup1')
self.session.create_entity('CWGroup', name=u'bougloup2')
self.sexecute('SET U in_group G WHERE G name ~= "bougloup%", U login "admin"')
@@ -465,8 +465,8 @@
self._schema = repo.schema
super(RQL2LDAPFilterTC, self).setUp()
ldapsource = repo.sources[-1]
- self.pool = repo._get_pool()
- session = mock_object(pool=self.pool)
+ self.cnxset = repo._get_cnxset()
+ session = mock_object(cnxset=self.cnxset)
self.o = RQL2LDAPFilter(ldapsource, session)
self.ldapclasses = ''.join(ldapsource.base_filters)
--- a/server/test/unittest_migractions.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_migractions.py Thu May 19 10:53:11 2011 +0200
@@ -338,7 +338,7 @@
@tag('longrun')
def test_sync_schema_props_perms(self):
cursor = self.mh.session
- cursor.set_pool()
+ cursor.set_cnxset()
nbrqlexpr_start = cursor.execute('Any COUNT(X) WHERE X is RQLExpression')[0][0]
migrschema['titre'].rdefs[('Personne', 'String')].order = 7
migrschema['adel'].rdefs[('Personne', 'String')].order = 6
--- a/server/test/unittest_querier.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_querier.py Thu May 19 10:53:11 2011 +0200
@@ -1116,7 +1116,7 @@
#'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y'
eeid, = self.o.execute(s, 'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y WHERE Y is EmailAddress')[0]
self.o.execute(s, "DELETE Email X")
- sqlc = s.pool['system']
+ sqlc = s.cnxset['system']
sqlc.execute('SELECT * FROM recipients_relation')
self.assertEqual(len(sqlc.fetchall()), 0)
sqlc.execute('SELECT * FROM owned_by_relation WHERE eid_from=%s'%eeid)
@@ -1229,7 +1229,7 @@
self.assertEqual(rset.description, [('CWUser',)])
self.assertRaises(Unauthorized,
self.execute, "Any P WHERE X is CWUser, X login 'bob', X upassword P")
- cursor = self.pool['system']
+ cursor = self.cnxset['system']
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
@@ -1244,7 +1244,7 @@
self.assertEqual(rset.description[0][0], 'CWUser')
rset = self.execute("SET X upassword %(pwd)s WHERE X is CWUser, X login 'bob'",
{'pwd': 'tutu'})
- cursor = self.pool['system']
+ cursor = self.cnxset['system']
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
--- a/server/test/unittest_repository.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_repository.py Thu May 19 10:53:11 2011 +0200
@@ -63,7 +63,7 @@
table = SQL_PREFIX + 'CWEType'
namecol = SQL_PREFIX + 'name'
finalcol = SQL_PREFIX + 'final'
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % (
namecol, table, finalcol))
self.assertEqual(cu.fetchall(), [])
@@ -260,7 +260,7 @@
cnxid = repo.connect(self.admlogin, password=self.admpassword)
# rollback state change which trigger TrInfo insertion
session = repo._get_session(cnxid)
- session.set_pool()
+ session.set_cnxset()
user = session.user
user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
rset = repo.execute(cnxid, 'TrInfo T WHERE T wf_info_for X, X eid %(x)s', {'x': user.eid})
@@ -293,7 +293,7 @@
try:
with self.assertRaises(Exception) as cm:
run_transaction()
- self.assertEqual(str(cm.exception), 'try to access pool on a closed session')
+ self.assertEqual(str(cm.exception), 'try to access connections set on a closed session')
finally:
t.join()
@@ -383,7 +383,7 @@
def test_internal_api(self):
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
- session = repo._get_session(cnxid, setpool=True)
+ session = repo._get_session(cnxid, setcnxset=True)
self.assertEqual(repo.type_and_source_from_eid(2, session),
('CWGroup', 'system', None))
self.assertEqual(repo.type_from_eid(2, session), 'CWGroup')
@@ -520,31 +520,31 @@
class DataHelpersTC(CubicWebTC):
def test_create_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assert_(self.repo.system_source.create_eid(self.session))
def test_source_from_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertEqual(self.repo.source_from_eid(1, self.session),
self.repo.sources_by_uri['system'])
def test_source_from_eid_raise(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(UnknownEid, self.repo.source_from_eid, -2, self.session)
def test_type_from_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertEqual(self.repo.type_from_eid(2, self.session), 'CWGroup')
def test_type_from_eid_raise(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(UnknownEid, self.repo.type_from_eid, -2, self.session)
def test_add_delete_info(self):
entity = self.repo.vreg['etypes'].etype_class('Personne')(self.session)
entity.eid = -1
entity.complete = lambda x: None
- self.session.set_pool()
+ self.session.set_cnxset()
self.repo.add_info(self.session, entity, self.repo.system_source)
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
@@ -567,7 +567,7 @@
self.commit()
ts = datetime.now()
self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT mtime, eid FROM entities WHERE eid = %s' % eidp)
omtime = cu.fetchone()[0]
# our sqlite datetime adapter is ignore seconds fraction, so we have to
@@ -576,7 +576,7 @@
self.execute('SET X nom "tata" WHERE X eid %(x)s', {'x': eidp})
self.commit()
self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT mtime FROM entities WHERE eid = %s' % eidp)
mtime = cu.fetchone()[0]
self.failUnless(omtime < mtime)
@@ -647,7 +647,7 @@
CubicWebTC.setUp(self)
CALLED[:] = ()
- def _after_relation_hook(self, pool, fromeid, rtype, toeid):
+ def _after_relation_hook(self, cnxset, fromeid, rtype, toeid):
self.called.append((fromeid, rtype, toeid))
def test_inline_relation(self):
--- a/server/test/unittest_security.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_security.py Thu May 19 10:53:11 2011 +0200
@@ -221,7 +221,7 @@
rset = cu.execute('Personne P')
self.assertEqual(len(rset), 1)
ent = rset.get_entity(0, 0)
- session.set_pool() # necessary
+ session.set_cnxset() # necessary
self.assertRaises(Unauthorized, ent.cw_check_perm, 'update')
self.assertRaises(Unauthorized,
cu.execute, "SET P travaille S WHERE P is Personne, S is Societe")
@@ -579,7 +579,7 @@
cnx = self.login('iaminusersgrouponly')
session = self.session
# needed to avoid check_perm error
- session.set_pool()
+ session.set_cnxset()
# needed to remove rql expr granting update perm to the user
affaire_perms = self.schema['Affaire'].permissions.copy()
self.schema['Affaire'].set_action_permissions('update', self.schema['Affaire'].get_groups('update'))
--- a/server/test/unittest_session.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_session.py Thu May 19 10:53:11 2011 +0200
@@ -74,9 +74,9 @@
self.assertEqual(session.disabled_hook_categories, set())
self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
# leaving context manager with no transaction running should reset the
- # transaction local storage (and associated pool)
+ # transaction local storage (and associated cnxset)
self.assertEqual(session._tx_data, {})
- self.assertEqual(session.pool, None)
+ self.assertEqual(session.cnxset, None)
if __name__ == '__main__':
unittest_main()
--- a/server/test/unittest_undo.py Thu May 19 10:36:26 2011 +0200
+++ b/server/test/unittest_undo.py Thu May 19 10:53:11 2011 +0200
@@ -232,7 +232,7 @@
self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': c.eid}))
self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': p.eid}))
self.failIf(self.execute('Any X,Y WHERE X fiche Y'))
- self.session.set_pool()
+ self.session.set_cnxset()
for eid in (p.eid, c.eid):
self.failIf(session.system_sql(
'SELECT * FROM entities WHERE eid=%s' % eid).fetchall())