--- a/debian/control Sat Jun 11 09:22:25 2011 +0200
+++ b/debian/control Wed Jun 15 17:16:19 2011 +0200
@@ -8,6 +8,8 @@
Aurélien Campéas <aurelien.campeas@logilab.fr>,
Nicolas Chauvat <nicolas.chauvat@logilab.fr>
Build-Depends: debhelper (>= 7), python (>= 2.5), python-central (>= 0.5)
+# for the documentation:
+# python-sphinx, python-logilab-common, python-unittest2,
Standards-Version: 3.9.1
Homepage: http://www.cubicweb.org
XS-Python-Version: >= 2.5, << 2.7
@@ -133,6 +135,7 @@
Package: cubicweb-documentation
+Architecture: all
Recommends: doc-base
Description: documentation for the CubicWeb framework
CubicWeb is a semantic web application framework.
--- a/debian/rules Sat Jun 11 09:22:25 2011 +0200
+++ b/debian/rules Wed Jun 15 17:16:19 2011 +0200
@@ -15,6 +15,9 @@
# cd doc && make
# FIXME cleanup and use sphinx-build as build-depends ?
NO_SETUPTOOLS=1 python setup.py build
+ # XXX uncomment this and associated build-depends in control
+ #when necessary sphinx version is in all built distribution
+ #PYTHONPATH=$(CURDIR)/.. $(MAKE) -C doc/book/en all
touch build-stamp
clean:
--- a/devtools/__init__.py Sat Jun 11 09:22:25 2011 +0200
+++ b/devtools/__init__.py Wed Jun 15 17:16:19 2011 +0200
@@ -601,15 +601,18 @@
def _backup_database(self, db_id):
"""Actual backup the current database.
- return a value to be stored in db_cache to allow restoration"""
+ return a value to be stored in db_cache to allow restoration
+ """
from cubicweb.server.serverctl import createdb
orig_name = self.system_source['db-name']
try:
backup_name = self._backup_name(db_id)
self._drop(backup_name)
self.system_source['db-name'] = backup_name
+ self._repo.turn_repo_off()
createdb(self.helper, self.system_source, self.dbcnx, self.cursor, template=orig_name)
self.dbcnx.commit()
+ self._repo.turn_repo_on()
return backup_name
finally:
self.system_source['db-name'] = orig_name
--- a/doc/tools/pyjsrest.py Sat Jun 11 09:22:25 2011 +0200
+++ b/doc/tools/pyjsrest.py Wed Jun 15 17:16:19 2011 +0200
@@ -127,8 +127,6 @@
'cubicweb.htmlhelpers',
'cubicweb.ajax',
- 'cubicweb.lazy',
- 'cubicweb.tabs',
'cubicweb.ajax.box',
'cubicweb.facets',
'cubicweb.widgets',
--- a/entity.py Sat Jun 11 09:22:25 2011 +0200
+++ b/entity.py Wed Jun 15 17:16:19 2011 +0200
@@ -108,7 +108,7 @@
# class attributes that must be set in class definition
rest_attr = None
fetch_attrs = None
- skip_copy_for = ('in_state',)
+ skip_copy_for = ('in_state',) # XXX turn into a set
# class attributes set automatically at registration time
e_schema = None
--- a/hooks/integrity.py Sat Jun 11 09:22:25 2011 +0200
+++ b/hooks/integrity.py Wed Jun 15 17:16:19 2011 +0200
@@ -121,10 +121,10 @@
return
session = self._cw
eidfrom, eidto = self.eidfrom, self.eidto
- pendingrdefs = session.transaction_data.get('pendingrdefs', ())
- if (session.describe(eidfrom)[0], rtype, session.describe(eidto)[0]) in pendingrdefs:
+ rdef = session.rtype_eids_rdef(rtype, eidfrom, eidto)
+ if (rdef.subject, rtype, rdef.object) in session.transaction_data.get('pendingrdefs', ()):
return
- card = session.schema_rproperty(rtype, eidfrom, eidto, 'cardinality')
+ card = rdef.cardinality
if card[0] in '1+' and not session.deleted_in_transaction(eidfrom):
_CheckSRelationOp.get_instance(session).add_data((eidfrom, rtype))
if card[1] in '1+' and not session.deleted_in_transaction(eidto):
@@ -190,8 +190,8 @@
def __call__(self):
# XXX get only RQL[Unique]Constraints?
- constraints = self._cw.schema_rproperty(self.rtype, self.eidfrom, self.eidto,
- 'constraints')
+ rdef = self._cw.rtype_eids_rdef(self.rtype, self.eidfrom, self.eidto)
+ constraints = rdef.constraints
if constraints:
_CheckConstraintsOp.get_instance(self._cw).add_data(
(self.eidfrom, self.rtype, self.eidto, constraints))
@@ -341,15 +341,15 @@
def __call__(self):
# if the relation is being delete, don't delete composite's components
# automatically
- pendingrdefs = self._cw.transaction_data.get('pendingrdefs', ())
- if (self._cw.describe(self.eidfrom)[0], self.rtype,
- self._cw.describe(self.eidto)[0]) in pendingrdefs:
+ session = self._cw
+ rtype = self.rtype
+ rdef = session.rtype_eids_rdef(rtype, self.eidfrom, self.eidto)
+ if (rdef.subject, rtype, rdef.object) in session.transaction_data.get('pendingrdefs', ()):
return
- composite = self._cw.schema_rproperty(self.rtype, self.eidfrom, self.eidto,
- 'composite')
+ composite = rdef.composite
if composite == 'subject':
_DelayedDeleteOEntityOp.get_instance(self._cw).add_data(
- (self.eidto, self.rtype))
+ (self.eidto, rtype))
elif composite == 'object':
_DelayedDeleteSEntityOp.get_instance(self._cw).add_data(
- (self.eidfrom, self.rtype))
+ (self.eidfrom, rtype))
--- a/hooks/metadata.py Sat Jun 11 09:22:25 2011 +0200
+++ b/hooks/metadata.py Wed Jun 15 17:16:19 2011 +0200
@@ -108,7 +108,7 @@
# skip this special composite relation # XXX (syt) why?
return
eidfrom, eidto = self.eidfrom, self.eidto
- composite = self._cw.schema_rproperty(self.rtype, eidfrom, eidto, 'composite')
+ composite = self._cw.rtype_eids_rdef(self.rtype, eidfrom, eidto).composite
if composite == 'subject':
SyncOwnersOp.get_instance(self._cw).add_data( (eidfrom, eidto) )
elif composite == 'object':
--- a/hooks/syncschema.py Sat Jun 11 09:22:25 2011 +0200
+++ b/hooks/syncschema.py Wed Jun 15 17:16:19 2011 +0200
@@ -577,7 +577,8 @@
elif lastrel:
DropRelationTable(session, str(rschema))
# then update the in-memory schema
- rschema.del_relation_def(rdef.subject, rdef.object)
+ if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
+ rschema.del_relation_def(rdef.subject, rdef.object)
# if this is the last relation definition of this type, drop associated
# relation type
if lastrel and not session.deleted_in_transaction(rschema.eid):
@@ -588,8 +589,10 @@
#
# Note: add_relation_def takes a RelationDefinition, not a
# RelationDefinitionSchema, needs to fake it
- self.rdef.name = str(self.rdef.rtype)
- self.session.vreg.schema.add_relation_def(self.rdef)
+ rdef = self.rdef
+ rdef.name = str(rdef.rtype)
+ if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
+ self.session.vreg.schema.add_relation_def(rdef)
@@ -885,7 +888,7 @@
if name in CORE_TYPES:
raise ValidationError(self.entity.eid, {None: self._cw._('can\'t be deleted')})
# delete every entities of this type
- if not name in ETYPE_NAME_MAP:
+ if name not in ETYPE_NAME_MAP:
self._cw.execute('DELETE %s X' % name)
MemSchemaCWETypeDel(self._cw, etype=name)
DropTable(self._cw, table=SQL_PREFIX + name)
@@ -1069,6 +1072,8 @@
return
subjtype = entity.stype.name
objtype = entity.otype.name
+ if subjtype in ETYPE_NAME_MAP or objtype in ETYPE_NAME_MAP:
+ return
rschema = self._cw.vreg.schema[entity.rtype.name]
# note: do not access schema rdef here, it may be added later by an
# operation
--- a/misc/migration/3.10.0_Any.py Sat Jun 11 09:22:25 2011 +0200
+++ b/misc/migration/3.10.0_Any.py Wed Jun 15 17:16:19 2011 +0200
@@ -32,7 +32,7 @@
# rename cwprops for boxes/contentnavigation
for x in rql('Any X,XK WHERE X pkey XK, '
- 'X pkey ~= "boxes.%s" OR '
- 'X pkey ~= "contentnavigation.%s"').entities():
+ 'X pkey ~= "boxes.%" OR '
+ 'X pkey ~= "contentnavigation.%"').entities():
x.set_attributes(pkey=u'ctxcomponents.' + x.pkey.split('.', 1)[1])
--- a/server/hook.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/hook.py Wed Jun 15 17:16:19 2011 +0200
@@ -928,6 +928,13 @@
def _build_container(self):
return self.containercls()
+ def union(self, data):
+ """only when container is a set"""
+ assert not self._processed, """Trying to add data to a closed operation.
+Iterating over operation data closed it and should be reserved to precommit /
+postcommit method of the operation."""
+ self._container |= data
+
def add_data(self, data):
assert not self._processed, """Trying to add data to a closed operation.
Iterating over operation data closed it and should be reserved to precommit /
--- a/server/migractions.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/migractions.py Wed Jun 15 17:16:19 2011 +0200
@@ -946,23 +946,19 @@
# triggered by schema synchronization hooks.
session = self.session
for rdeftype in ('CWRelation', 'CWAttribute'):
- thispending = set()
- for eid, in self.sqlexec('SELECT cw_eid FROM cw_%s '
- 'WHERE cw_from_entity=%%(eid)s OR '
- ' cw_to_entity=%%(eid)s' % rdeftype,
- {'eid': oldeid}, ask_confirm=False):
- # we should add deleted eids into pending eids else we may
- # get some validation error on commit since integrity hooks
- # may think some required relation is missing... This also ensure
- # repository caches are properly cleanup
- hook.CleanupDeletedEidsCacheOp.get_instance(session).add_data(eid)
- # and don't forget to remove record from system tables
- self.repo.system_source.delete_info(
- session, session.entity_from_eid(eid, rdeftype),
- 'system', None)
- thispending.add(eid)
- self.sqlexec('DELETE FROM cw_%s '
- 'WHERE cw_from_entity=%%(eid)s OR '
+ thispending = set( (eid for eid, in self.sqlexec(
+ 'SELECT cw_eid FROM cw_%s WHERE cw_from_entity=%%(eid)s OR '
+ ' cw_to_entity=%%(eid)s' % rdeftype,
+ {'eid': oldeid}, ask_confirm=False)) )
+ # we should add deleted eids into pending eids else we may
+ # get some validation error on commit since integrity hooks
+ # may think some required relation is missing... This also ensure
+ # repository caches are properly cleanup
+ hook.CleanupDeletedEidsCacheOp.get_instance(session).union(thispending)
+ # and don't forget to remove record from system tables
+ entities = [session.entity_from_eid(eid, rdeftype) for eid in thispending]
+ self.repo.system_source.delete_info_multi(session, entities, 'system')
+ self.sqlexec('DELETE FROM cw_%s WHERE cw_from_entity=%%(eid)s OR '
'cw_to_entity=%%(eid)s' % rdeftype,
{'eid': oldeid}, ask_confirm=False)
# now we have to manually cleanup relations pointing to deleted
@@ -1143,6 +1139,10 @@
syncprops=syncprops)
else:
for etype in self.repo.schema.entities():
+ if etype.eid is None:
+ # not yet added final etype (thing to BigInt defined in
+ # yams though 3.13 migration not done yet)
+ continue
self._synchronize_eschema(etype, syncrdefs=syncrdefs,
syncprops=syncprops, syncperms=syncperms)
if commit:
--- a/server/repository.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/repository.py Wed Jun 15 17:16:19 2011 +0200
@@ -34,6 +34,7 @@
import sys
import threading
import Queue
+from warnings import warn
from itertools import chain
from os.path import join
from datetime import datetime
@@ -82,10 +83,6 @@
if rtype in schema.VIRTUAL_RTYPES or (rtype, 'object') in NO_CACHE_RELATIONS:
continue
entity.cw_set_relation_cache(rtype, 'object', session.empty_rset())
- # set inlined relation cache before call to after_add_entity
- for attr, value in relations:
- session.update_rel_cache_add(entity.eid, attr, value)
- del_existing_rel_if_needed(session, entity.eid, attr, value)
def del_existing_rel_if_needed(session, eidfrom, rtype, eidto):
"""delete existing relation when adding a new one if card is 1 or ?
@@ -96,14 +93,11 @@
this kind of behaviour has to be done in the repository so we don't have
hooks order hazardness
"""
- # skip that for internal session or if integrity explicitly disabled
- #
- # XXX we should imo rely on the orm to first fetch existing entity if any
- # then delete it.
- if session.is_internal_session \
- or not session.is_hook_category_activated('activeintegrity'):
+ # skip that if integrity explicitly disabled
+ if not session.is_hook_category_activated('activeintegrity'):
return
- card = session.schema_rproperty(rtype, eidfrom, eidto, 'cardinality')
+ rdef = session.rtype_eids_rdef(rtype, eidfrom, eidto)
+ card = rdef.cardinality
# one may be tented to check for neweids but this may cause more than one
# relation even with '1?' cardinality if thoses relations are added in the
# same transaction where the entity is being created. This never occurs from
@@ -115,7 +109,7 @@
# * inlined relations will be implicitly deleted for the subject entity
# * we don't want read permissions to be applied but we want delete
# permission to be checked
- if card[0] in '1?' and not session.repo.schema.rschema(rtype).inlined:
+ if card[0] in '1?':
with security_enabled(session, read=False):
session.execute('DELETE X %s Y WHERE X eid %%(x)s, '
'NOT Y eid %%(y)s' % rtype,
@@ -1118,17 +1112,6 @@
hook.CleanupDeletedEidsCacheOp.get_instance(session).add_data(entity.eid)
self._delete_info(session, entity, sourceuri, extid, scleanup)
- def delete_info_multi(self, session, entities, sourceuri, extids, scleanup=None):
- """same as delete_info but accepts a list of entities and
- extids with the same etype and belonging to the same source
- """
- # mark eid as being deleted in session info and setup cache update
- # operation
- op = hook.CleanupDeletedEidsCacheOp.get_instance(session)
- for entity in entities:
- op.add_data(entity.eid)
- self._delete_info_multi(session, entities, sourceuri, extids, scleanup)
-
def _delete_info(self, session, entity, sourceuri, extid, scleanup=None):
"""delete system information on deletion of an entity:
* delete all remaining relations from/to this entity
@@ -1160,16 +1143,15 @@
except:
self.exception('error while cascading delete for entity %s '
'from %s. RQL: %s', entity, sourceuri, rql)
- self.system_source.delete_info(session, entity, sourceuri, extid)
+ self.system_source.delete_info_multi(session, [entity], sourceuri)
- def _delete_info_multi(self, session, entities, sourceuri, extids, scleanup=None):
+ def _delete_info_multi(self, session, entities, sourceuri, scleanup=None):
"""same as _delete_info but accepts a list of entities with
the same etype and belinging to the same source.
"""
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
# delete remaining relations: if user can delete the entity, he can
# delete all its relations without security checking
- assert entities and len(entities) == len(extids)
with security_enabled(session, read=False, write=False):
eids = [_e.eid for _e in entities]
in_eids = ','.join((str(eid) for eid in eids))
@@ -1191,7 +1173,7 @@
except:
self.exception('error while cascading delete for entity %s '
'from %s. RQL: %s', entities, sourceuri, rql)
- self.system_source.delete_info_multi(session, entities, sourceuri, extids)
+ self.system_source.delete_info_multi(session, entities, sourceuri)
def locate_relation_source(self, session, subject, rtype, object):
subjsource = self.source_from_eid(subject, session)
@@ -1255,16 +1237,24 @@
if server.DEBUG & server.DBG_REPO:
print 'ADD entity', self, entity.__regid__, entity.eid, edited
relations = []
+ prefill_entity_caches(entity, relations)
if source.should_call_hooks:
self.hm.call_hooks('before_add_entity', session, entity=entity)
+ activintegrity = session.is_hook_category_activated('activeintegrity')
for attr in edited.iterkeys():
rschema = eschema.subjrels[attr]
if not rschema.final: # inlined relation
- relations.append((attr, edited[attr]))
+ value = edited[attr]
+ relations.append((attr, value))
+ session.update_rel_cache_add(entity.eid, attr, value)
+ rdef = session.rtype_eids_rdef(attr, entity.eid, value)
+ if rdef.cardinality[1] in '1?' and activintegrity:
+ with security_enabled(session, read=False):
+ session.execute('DELETE X %s Y WHERE Y eid %%(y)s' % attr,
+ {'x': entity.eid, 'y': value})
edited.set_defaults()
if session.is_hook_category_activated('integrity'):
edited.check(creation=True)
- prefill_entity_caches(entity, relations)
try:
source.add_entity(session, entity)
except UniqueTogetherError, exc:
@@ -1366,6 +1356,16 @@
def glob_delete_entities(self, session, eids):
"""delete a list of entities and all related entities from the repository"""
+ # mark eids as being deleted in session info and setup cache update
+ # operation (register pending eids before actual deletion to avoid
+ # multiple call to glob_delete_entities)
+ op = hook.CleanupDeletedEidsCacheOp.get_instance(session)
+ if not isinstance(eids, (set, frozenset)):
+ warn('[3.13] eids should be given as a set', DeprecationWarning,
+ stacklevel=2)
+ eids = frozenset(eids)
+ eids = eids - op._container
+ op._container |= eids
data_by_etype_source = {} # values are ([list of eids],
# [list of extid],
# [list of entities])
@@ -1377,27 +1377,23 @@
for eid in eids:
etype, sourceuri, extid = self.type_and_source_from_eid(eid, session)
+ # XXX should cache entity's cw_metainformation
entity = session.entity_from_eid(eid, etype)
- _key = (etype, sourceuri)
- if _key not in data_by_etype_source:
- data_by_etype_source[_key] = ([eid], [extid], [entity])
- else:
- _data = data_by_etype_source[_key]
- _data[0].append(eid)
- _data[1].append(extid)
- _data[2].append(entity)
- for (etype, sourceuri), (eids, extids, entities) in data_by_etype_source.iteritems():
+ try:
+ data_by_etype_source[(etype, sourceuri)].append(entity)
+ except KeyError:
+ data_by_etype_source[(etype, sourceuri)] = [entity]
+ for (etype, sourceuri), entities in data_by_etype_source.iteritems():
if server.DEBUG & server.DBG_REPO:
- print 'DELETE entities', etype, eids
- #print 'DELETE entities', etype, len(eids)
+ print 'DELETE entities', etype, [entity.eid for entity in entities]
source = self.sources_by_uri[sourceuri]
if source.should_call_hooks:
self.hm.call_hooks('before_delete_entity', session, entities=entities)
- self._delete_info_multi(session, entities, sourceuri, extids) # xxx
+ self._delete_info_multi(session, entities, sourceuri)
source.delete_entities(session, entities)
if source.should_call_hooks:
self.hm.call_hooks('after_delete_entity', session, entities=entities)
- # don't clear cache here this is done in a hook on commit
+ # don't clear cache here, it is done in a hook on commit
def glob_add_relation(self, session, subject, rtype, object):
"""add a relation to the repository"""
@@ -1409,33 +1405,63 @@
relations is a dictionary rtype: [(subj_eid, obj_eid), ...]
"""
sources = {}
+ subjects_by_types = {}
+ objects_by_types = {}
+ activintegrity = session.is_hook_category_activated('activeintegrity')
for rtype, eids_subj_obj in relations.iteritems():
if server.DEBUG & server.DBG_REPO:
- for subject, object in relations:
- print 'ADD relation', subject, rtype, object
- for subject, object in eids_subj_obj:
- source = self.locate_relation_source(session, subject, rtype, object)
+ for subjeid, objeid in relations:
+ print 'ADD relation', subjeid, rtype, objeid
+ for subjeid, objeid in eids_subj_obj:
+ source = self.locate_relation_source(session, subjeid, rtype, objeid)
if source not in sources:
relations_by_rtype = {}
sources[source] = relations_by_rtype
else:
relations_by_rtype = sources[source]
if rtype in relations_by_rtype:
- relations_by_rtype[rtype].append((subject, object))
+ relations_by_rtype[rtype].append((subjeid, objeid))
else:
- relations_by_rtype[rtype] = [(subject, object)]
+ relations_by_rtype[rtype] = [(subjeid, objeid)]
+ if not activintegrity:
+ continue
+ # take care to relation of cardinality '?1', as all eids will
+ # be inserted later, we've remove duplicated eids since they
+ # won't be catched by `del_existing_rel_if_needed`
+ rdef = session.rtype_eids_rdef(rtype, subjeid, objeid)
+ card = rdef.cardinality
+ if card[0] in '?1':
+ with security_enabled(session, read=False):
+ session.execute('DELETE X %s Y WHERE X eid %%(x)s, '
+ 'NOT Y eid %%(y)s' % rtype,
+ {'x': subjeid, 'y': objeid})
+ subjects = subjects_by_types.setdefault(rdef, {})
+ if subjeid in subjects:
+ del relations_by_rtype[rtype][subjects[subjeid]]
+ subjects[subjeid] = len(relations_by_rtype[rtype]) - 1
+ continue
+ subjects[subjeid] = len(relations_by_rtype[rtype]) - 1
+ if card[1] in '?1':
+ with security_enabled(session, read=False):
+ session.execute('DELETE X %s Y WHERE Y eid %%(y)s, '
+ 'NOT X eid %%(x)s' % rtype,
+ {'x': subjeid, 'y': objeid})
+ objects = objects_by_types.setdefault(rdef, {})
+ if objeid in objects:
+ del relations_by_rtype[rtype][objects[objeid]]
+ objects[objeid] = len(relations_by_rtype[rtype])
+ continue
+ objects[objeid] = len(relations_by_rtype[rtype])
for source, relations_by_rtype in sources.iteritems():
if source.should_call_hooks:
for rtype, source_relations in relations_by_rtype.iteritems():
- for subject, object in source_relations:
- del_existing_rel_if_needed(session, subject, rtype, object)
self.hm.call_hooks('before_add_relation', session,
rtype=rtype, eids_from_to=source_relations)
for rtype, source_relations in relations_by_rtype.iteritems():
source.add_relations(session, rtype, source_relations)
rschema = self.schema.rschema(rtype)
- for subject, object in source_relations:
- session.update_rel_cache_add(subject, rtype, object, rschema.symmetric)
+ for subjeid, objeid in source_relations:
+ session.update_rel_cache_add(subjeid, rtype, objeid, rschema.symmetric)
if source.should_call_hooks:
for rtype, source_relations in relations_by_rtype.iteritems():
self.hm.call_hooks('after_add_relation', session,
--- a/server/session.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/session.py Wed Jun 15 17:16:19 2011 +0200
@@ -74,6 +74,13 @@
"""
return req.is_internal_session
+@objectify_selector
+def repairing(cls, req, **kwargs):
+ """repository side only selector returning 1 if the session is not a regular
+ user session but an internal session
+ """
+ return req.vreg.config.repairing
+
class transaction(object):
"""context manager to enter a transaction for a session: when exiting the
@@ -476,12 +483,12 @@
"""
return eid in self.transaction_data.get('neweids', ())
- def schema_rproperty(self, rtype, eidfrom, eidto, rprop):
- rschema = self.repo.schema[rtype]
- subjtype = self.describe(eidfrom)[0]
- objtype = self.describe(eidto)[0]
- rdef = rschema.rdef(subjtype, objtype)
- return rdef.get(rprop)
+ def rtype_eids_rdef(self, rtype, eidfrom, eidto):
+ # use type_and_source_from_eid instead of type_from_eid for optimization
+ # (avoid two extra methods call)
+ subjtype = self.repo.type_and_source_from_eid(eidfrom, self)[0]
+ objtype = self.repo.type_and_source_from_eid(eidto, self)[0]
+ return self.vreg.schema.rschema(rtype).rdefs[(subjtype, objtype)]
# security control #########################################################
@@ -1190,6 +1197,10 @@
# deprecated ###############################################################
+ @deprecated('[3.13] use getattr(session.rtype_eids_rdef(rtype, eidfrom, eidto), prop)')
+ def schema_rproperty(self, rtype, eidfrom, eidto, rprop):
+ return getattr(self.rtype_eids_rdef(rtype, eidfrom, eidto), rprop)
+
@property
@deprecated("[3.13] use .cnxset attribute instead of .pool")
def pool(self):
--- a/server/sources/__init__.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/sources/__init__.py Wed Jun 15 17:16:19 2011 +0200
@@ -462,19 +462,12 @@
"""mark entity as being modified, fulltext reindex if needed"""
raise NotImplementedError()
- def delete_info(self, session, entity, uri, extid):
- """delete system information on deletion of an entity by transfering
- record from the entities table to the deleted_entities table
+ def delete_info_multi(self, session, entities, uri):
+ """delete system information on deletion of a list of entities with the
+ same etype and belinging to the same source
"""
raise NotImplementedError()
- def delete_info_multi(self, session, entities, uri, extids):
- """ame as delete_info but accepts a list of entities with
- the same etype and belinging to the same source.
- """
- for entity, extid in itertools.izip(entities, extids):
- self.delete_info(session, entity, uri, extid)
-
def modified_entities(self, session, etypes, mtime):
"""return a 2-uple:
* list of (etype, eid) of entities of the given types which have been
--- a/server/sources/native.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/sources/native.py Wed Jun 15 17:16:19 2011 +0200
@@ -970,31 +970,13 @@
attrs = {'eid': entity.eid, 'mtime': datetime.now()}
self.doexec(session, self.sqlgen.update('entities', attrs, ['eid']), attrs)
- def delete_info(self, session, entity, uri, extid):
- """delete system information on deletion of an entity:
+ def delete_info_multi(self, session, entities, uri):
+ """delete system information on deletion of a list of entities with the
+ same etype and belinging to the same source
+
* update the fti
- * remove record from the entities table
- * transfer it to the deleted_entities table if the entity's type is
- multi-sources
- """
- self.fti_unindex_entities(session, [entity])
- attrs = {'eid': entity.eid}
- self.doexec(session, self.sqlgen.delete('entities', attrs), attrs)
- if not entity.__regid__ in self.multisources_etypes:
- return
- if extid is not None:
- assert isinstance(extid, str), type(extid)
- extid = b64encode(extid)
- attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': extid,
- 'source': uri, 'dtime': datetime.now()}
- self.doexec(session, self.sqlgen.insert('deleted_entities', attrs), attrs)
-
- def delete_info_multi(self, session, entities, uri, extids):
- """delete system information on deletion of an entity:
- * update the fti
- * remove record from the entities table
- * transfer it to the deleted_entities table if the entity's type is
- multi-sources
+ * remove record from the `entities` table
+ * transfer it to the `deleted_entities`
"""
self.fti_unindex_entities(session, entities)
attrs = {'eid': '(%s)' % ','.join([str(_e.eid) for _e in entities])}
@@ -1003,7 +985,8 @@
return
attrs = {'type': entities[0].__regid__,
'source': uri, 'dtime': datetime.now()}
- for entity, extid in itertools.izip(entities, extids):
+ for entity in entities:
+ extid = entity.cw_metainformation()['extid']
if extid is not None:
assert isinstance(extid, str), type(extid)
extid = b64encode(extid)
@@ -1320,7 +1303,7 @@
sql = self.sqlgen.delete(SQL_PREFIX + entity.__regid__, attrs)
self.doexec(session, sql, attrs)
# remove record from entities (will update fti if needed)
- self.delete_info(session, entity, self.uri, None)
+ self.delete_info_multi(session, [entity], self.uri)
self.repo.hm.call_hooks('after_delete_entity', session, entity=entity)
return ()
--- a/server/sources/pyrorql.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/sources/pyrorql.py Wed Jun 15 17:16:19 2011 +0200
@@ -314,9 +314,8 @@
def get_connection(self):
try:
return self._get_connection()
- except (ConnectionError, PyroError):
- self.critical("can't get connection to source %s", self.uri,
- exc_info=1)
+ except (ConnectionError, PyroError), ex:
+ self.critical("can't get connection to source %s: %s", self.uri, ex)
return ConnectionWrapper()
def check_connection(self, cnx):
--- a/server/ssplanner.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/ssplanner.py Wed Jun 15 17:16:19 2011 +0200
@@ -28,7 +28,6 @@
from cubicweb.schema import VIRTUAL_RTYPES
from cubicweb.rqlrewrite import add_types_restriction
from cubicweb.server.session import security_enabled
-from cubicweb.server.hook import CleanupDeletedEidsCacheOp
from cubicweb.server.edition import EditedEntity
READ_ONLY_RTYPES = set(('eid', 'has_text', 'is', 'is_instance_of', 'identity'))
@@ -521,13 +520,7 @@
if results:
todelete = frozenset(typed_eid(eid) for eid, in results)
session = self.plan.session
- # mark eids as being deleted in session info and setup cache update
- # operation (register pending eids before actual deletion to avoid
- # multiple call to glob_delete_entities)
- op = CleanupDeletedEidsCacheOp.get_instance(session)
- actual = todelete - op._container
- op._container |= actual
- session.repo.glob_delete_entities(session, actual)
+ session.repo.glob_delete_entities(session, todelete)
return results
class DeleteRelationsStep(Step):
--- a/server/test/unittest_repository.py Sat Jun 11 09:22:25 2011 +0200
+++ b/server/test/unittest_repository.py Wed Jun 15 17:16:19 2011 +0200
@@ -847,6 +847,29 @@
t1 = time.time()
self.info('add relations (inlined): %.2gs', t1-t0)
+ def test_optional_relation_reset_1(self):
+ req = self.request()
+ p1 = req.create_entity('Personne', nom=u'Vincent')
+ p2 = req.create_entity('Personne', nom=u'Florent')
+ w = req.create_entity('Affaire', ref=u'wc')
+ w.set_relations(todo_by=[p1,p2])
+ w.clear_all_caches()
+ self.commit()
+ self.assertEqual(len(w.todo_by), 1)
+ self.assertEqual(w.todo_by[0].eid, p2.eid)
+
+ def test_optional_relation_reset_2(self):
+ req = self.request()
+ p1 = req.create_entity('Personne', nom=u'Vincent')
+ p2 = req.create_entity('Personne', nom=u'Florent')
+ w = req.create_entity('Affaire', ref=u'wc')
+ w.set_relations(todo_by=p1)
+ self.commit()
+ w.set_relations(todo_by=p2)
+ w.clear_all_caches()
+ self.commit()
+ self.assertEqual(len(w.todo_by), 1)
+ self.assertEqual(w.todo_by[0].eid, p2.eid)
if __name__ == '__main__':
--- a/web/views/ibreadcrumbs.py Sat Jun 11 09:22:25 2011 +0200
+++ b/web/views/ibreadcrumbs.py Wed Jun 15 17:16:19 2011 +0200
@@ -195,7 +195,7 @@
def cell_call(self, row, col, **kwargs):
entity = self.cw_rset.get_entity(row, col)
- desc = xml_escape(uilib.cut(entity.dc_description(), 50))
+ desc = uilib.cut(entity.dc_description(), 50)
# NOTE remember camember: tags.a autoescapes
self.w(tags.a(entity.view('breadcrumbtext'),
href=entity.absolute_url(), title=desc))