--- a/.hgtags Fri Jul 02 10:29:32 2010 +0200
+++ b/.hgtags Fri Jul 02 11:52:51 2010 +0200
@@ -133,3 +133,5 @@
b7883287f40c853e8278edc3f24326f2c9549954 cubicweb-debian-version-3.8.4-1
2de32c0c293ba451b231efe77d6027376af3a2a3 cubicweb-version-3.8.5
5d05b08adeab1ea301e49ed8537e35ede6db92f6 cubicweb-debian-version-3.8.5-1
+1a24c62aefc5e57f61be3d04affd415288e81904 cubicweb-version-3.8.6
+607a90073911b6bb941a49b5ec0b0d2a9cd479af cubicweb-debian-version-3.8.6-1
--- a/dbapi.py Fri Jul 02 10:29:32 2010 +0200
+++ b/dbapi.py Fri Jul 02 11:52:51 2010 +0200
@@ -440,7 +440,7 @@
DeprecationWarning, stacklevel=2)
# XXX use named argument for build_descr in case repo is < 3.8
rset = self._repo.execute(self._sessid, rql, args,
- build_descr=build_descr, txid=self._txid())
+ build_descr=build_descr, **self._txid())
rset.req = self.req
return rset
@@ -479,6 +479,10 @@
if cnxprops and cnxprops.log_queries:
self.executed_queries = []
self.cursor_class = LogCursor
+ if self._cnxtype == 'pyro':
+ # check client/server compat
+ if self._repo.get_versions()['cubicweb'] < (3, 8, 6):
+ self._txid = lambda cursor=None: {}
def __repr__(self):
if self.anonymous_connection:
@@ -496,7 +500,8 @@
return False #propagate the exception
def _txid(self, cursor=None): # XXX could now handle various isolation level!
- return currentThread().getName()
+ # return a dict as bw compat trick
+ return {'txid': currentThread().getName()}
def request(self):
return DBAPIRequest(self.vreg, DBAPISession(self))
@@ -634,7 +639,7 @@
def describe(self, eid):
if self._closed is not None:
raise ProgrammingError('Closed connection')
- return self._repo.describe(self.sessionid, eid, txid=self._txid())
+ return self._repo.describe(self.sessionid, eid, **self._txid())
def close(self):
"""Close the connection now (rather than whenever __del__ is called).
@@ -647,7 +652,7 @@
"""
if self._closed:
raise ProgrammingError('Connection is already closed')
- self._repo.close(self.sessionid, txid=self._txid())
+ self._repo.close(self.sessionid, **self._txid())
del self._repo # necessary for proper garbage collection
self._closed = 1
@@ -661,7 +666,7 @@
"""
if not self._closed is None:
raise ProgrammingError('Connection is already closed')
- return self._repo.commit(self.sessionid, txid=self._txid())
+ return self._repo.commit(self.sessionid, **self._txid())
def rollback(self):
"""This method is optional since not all databases provide transaction
@@ -674,7 +679,7 @@
"""
if not self._closed is None:
raise ProgrammingError('Connection is already closed')
- self._repo.rollback(self.sessionid, txid=self._txid())
+ self._repo.rollback(self.sessionid, **self._txid())
def cursor(self, req=None):
"""Return a new Cursor Object using the connection.
@@ -714,8 +719,8 @@
only searched in 'public' actions, unless a `public` argument is given
and set to false.
"""
+ actionfilters.update(self._txid())
txinfos = self._repo.undoable_transactions(self.sessionid, ueid,
- txid=self._txid(),
**actionfilters)
if req is None:
req = self.request()
@@ -731,7 +736,7 @@
him).
"""
txinfo = self._repo.transaction_info(self.sessionid, txuuid,
- txid=self._txid())
+ **self._txid())
if req is None:
req = self.request()
txinfo.req = req
@@ -748,7 +753,7 @@
transaction doesn't belong to him).
"""
return self._repo.transaction_actions(self.sessionid, txuuid, public,
- txid=self._txid())
+ **self._txid())
def undo_transaction(self, txuuid):
"""Undo the given transaction. Return potential restoration errors.
@@ -758,4 +763,4 @@
him).
"""
return self._repo.undo_transaction(self.sessionid, txuuid,
- txid=self._txid())
+ **self._txid())
--- a/debian/changelog Fri Jul 02 10:29:32 2010 +0200
+++ b/debian/changelog Fri Jul 02 11:52:51 2010 +0200
@@ -1,3 +1,9 @@
+cubicweb (3.8.6-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 02 Jul 2010 00:39:36 +0200
+
cubicweb (3.8.5-1) unstable; urgency=low
* new upstream release
--- a/server/checkintegrity.py Fri Jul 02 10:29:32 2010 +0200
+++ b/server/checkintegrity.py Fri Jul 02 11:52:51 2010 +0200
@@ -88,7 +88,7 @@
else:
yield eschema
-def reindex_entities(schema, session, withpb=True):
+def reindex_entities(schema, session, withpb=True, etypes=None):
"""reindex all entities in the repository"""
# deactivate modification_date hook since we don't want them
# to be updated due to the reindexation
@@ -98,15 +98,16 @@
print 'no text index table'
repo.system_source.dbhelper.init_fti(cursor)
repo.system_source.do_fti = True # ensure full-text indexation is activated
- etypes = set()
- for eschema in schema.entities():
- if eschema.final:
- continue
- indexable_attrs = tuple(eschema.indexable_attributes()) # generator
- if not indexable_attrs:
- continue
- for container in etype_fti_containers(eschema):
- etypes.add(container)
+ if etypes is None:
+ etypes = set()
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ indexable_attrs = tuple(eschema.indexable_attributes()) # generator
+ if not indexable_attrs:
+ continue
+ for container in etype_fti_containers(eschema):
+ etypes.add(container)
print 'Reindexing entities of type %s' % \
', '.join(sorted(str(e) for e in etypes))
if withpb:
--- a/server/migractions.py Fri Jul 02 10:29:32 2010 +0200
+++ b/server/migractions.py Fri Jul 02 11:52:51 2010 +0200
@@ -25,9 +25,8 @@
The following data actions are supported for now:
* add an entity
* execute raw RQL queries
-
+"""
-"""
from __future__ import with_statement
__docformat__ = "restructuredtext en"
@@ -1231,6 +1230,13 @@
self.commit()
return entity
+ def cmd_reindex_entities(self, etypes=None):
+ """force reindexaction of entities of the given types or of all
+ indexable entity types
+ """
+ from cubicweb.server.checkintegrity import reindex_entities
+ reindex_entities(self.repo.schema, self.session, etypes=etypes)
+
@deprecated('[3.5] use create_entity', stacklevel=3)
def cmd_add_entity(self, etype, *args, **kwargs):
"""add a new entity of the given type"""
--- a/server/sources/ldapuser.py Fri Jul 02 10:29:32 2010 +0200
+++ b/server/sources/ldapuser.py Fri Jul 02 11:52:51 2010 +0200
@@ -19,8 +19,6 @@
this source is for now limited to a read-only CWUser source
-
-
Part of the code is coming form Zope's LDAPUserFolder
Copyright (c) 2004 Jens Vagelpohl.
@@ -278,7 +276,10 @@
to fetch the salt first
"""
self.info('ldap authenticate %s', login)
- if password is None:
+ if not password:
+ # On Windows + ADAM this would have succeeded (!!!)
+ # You get Authenticated as: 'NT AUTHORITY\ANONYMOUS LOGON'.
+ # we really really don't want that
raise AuthenticationError()
searchfilter = [filter_format('(%s=%s)', (self.user_login_attr, login))]
searchfilter.extend([filter_format('(%s=%s)', ('objectClass', o))
@@ -293,16 +294,13 @@
raise AuthenticationError()
# check password by establishing a (unused) connection
try:
- if password:
- self._connect(user, password)
- else:
- # On Windows + ADAM this would have succeeded (!!!)
- # You get Authenticated as: 'NT AUTHORITY\ANONYMOUS LOGON'.
- # we really really don't want that
- raise Exception('No password provided')
- except Exception, ex:
+ self._connect(user, password)
+ except ldap.LDAPError, ex:
+ # Something went wrong, most likely bad credentials
self.info('while trying to authenticate %s: %s', user, ex)
- # Something went wrong, most likely bad credentials
+ raise AuthenticationError()
+ except Exception:
+ self.error('while trying to authenticate %s', user, exc_info=True)
raise AuthenticationError()
return self.extid2eid(user['dn'], 'CWUser', session)
--- a/server/sources/storages.py Fri Jul 02 10:29:32 2010 +0200
+++ b/server/sources/storages.py Fri Jul 02 11:52:51 2010 +0200
@@ -106,7 +106,7 @@
"""
fpath = source.binary_to_str(value)
try:
- return Binary(file(fpath).read())
+ return Binary(file(fpath, 'rb').read())
except OSError, ex:
source.critical("can't open %s: %s", value, ex)
return None
@@ -114,28 +114,50 @@
def entity_added(self, entity, attr):
"""an entity using this storage for attr has been added"""
if entity._cw.transaction_data.get('fs_importing'):
- binary = Binary(file(entity[attr].getvalue()).read())
+ binary = Binary(file(entity[attr].getvalue(), 'rb').read())
else:
binary = entity.pop(attr)
fpath = self.new_fs_path(entity, attr)
# bytes storage used to store file's path
entity[attr] = Binary(fpath)
- file(fpath, 'w').write(binary.getvalue())
+ file(fpath, 'wb').write(binary.getvalue())
hook.set_operation(entity._cw, 'bfss_added', fpath, AddFileOp)
return binary
def entity_updated(self, entity, attr):
"""an entity using this storage for attr has been updatded"""
+ # get the name of the previous file containing the value
oldpath = self.current_fs_path(entity, attr)
if entity._cw.transaction_data.get('fs_importing'):
+ # If we are importing from the filesystem, the file already exists.
+ # We do not need to create it but we need to fetch the content of
+ # the file as the actual content of the attribute
fpath = entity[attr].getvalue()
- binary = Binary(file(fpath).read())
+ binary = Binary(file(fpath, 'rb').read())
else:
+ # We must store the content of the attributes
+ # into a file to stay consistent with the behaviour of entity_add.
+ # Moreover, the BytesFileSystemStorage expects to be able to
+ # retrieve the current value of the attribute at anytime by reading
+ # the file on disk. To be able to rollback things, use a new file
+ # and keep the old one that will be removed on commit if everything
+ # went ok.
+ #
+ # fetch the current attribute value in memory
binary = entity.pop(attr)
+ # Get filename for it
fpath = self.new_fs_path(entity, attr)
- UpdateFileOp(entity._cw, filepath=fpath, filedata=binary.getvalue())
+ assert not osp.exists(fpath)
+ # write attribute value on disk
+ file(fpath, 'wb').write(binary.getvalue())
+ # Mark the new file as added during the transaction.
+ # The file will be removed on rollback
+ hook.set_operation(entity._cw, 'bfss_added', fpath, AddFileOp)
if oldpath != fpath:
+ # register the new location for the file.
entity[attr] = Binary(fpath)
+ # Mark the old file as useless so the file will be removed at
+ # commit.
hook.set_operation(entity._cw, 'bfss_deleted', oldpath,
DeleteFileOp)
return binary
@@ -200,10 +222,3 @@
unlink(filepath)
except Exception, ex:
self.error('cant remove %s: %s' % (filepath, ex))
-
-class UpdateFileOp(hook.Operation):
- def precommit_event(self):
- try:
- file(self.filepath, 'w').write(self.filedata)
- except Exception, ex:
- self.exception(str(ex))