--- a/devtools/apptest.py Fri Jul 31 23:26:52 2009 +0200
+++ b/devtools/apptest.py Sat Aug 01 00:01:12 2009 +0200
@@ -457,7 +457,6 @@
pactionsdict = EnvBasedTC.pactionsdict.im_func
# default test setup and teardown #########################################
- copy_schema = False
def _prepare(self):
MAILBOX[:] = [] # reset mailbox
@@ -470,16 +469,6 @@
self.__close = repo.close
self.cnxid = self.cnx.sessionid
self.session = repo._sessions[self.cnxid]
- if self.copy_schema:
- # XXX copy schema since hooks may alter it and it may be not fully
- # cleaned (missing some schema synchronization support)
- try:
- origschema = repo.__schema
- except AttributeError:
- repo.__schema = origschema = repo.schema
- repo.schema = deepcopy(origschema)
- repo.set_schema(repo.schema) # reset hooks
- repo.vreg.update_schema(repo.schema)
self.cnxs = []
# reset caches, they may introduce bugs among tests
repo._type_source_cache = {}
--- a/entities/schemaobjs.py Fri Jul 31 23:26:52 2009 +0200
+++ b/entities/schemaobjs.py Sat Aug 01 00:01:12 2009 +0200
@@ -112,6 +112,18 @@
return self.relation_type[0].rest_path(), {}
return super(CWRelation, self).after_deletion_path()
+ @property
+ def rtype(self):
+ return self.relation_type[0]
+
+ @property
+ def stype(self):
+ return self.from_entity[0]
+
+ @property
+ def otype(self):
+ return self.to_entity[0]
+
class CWAttribute(CWRelation):
id = 'CWAttribute'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.3.5_Any.py Sat Aug 01 00:01:12 2009 +0200
@@ -0,0 +1,8 @@
+# some entities have been added before schema entities, fix the 'is' and
+# 'is_instance_of' relations
+for rtype in ('is', 'is_instance_of'):
+ sql('INSERT INTO %s_relation '
+ 'SELECT X.eid, ET.cw_eid FROM entities as X, cw_CWEType as ET '
+ 'WHERE X.type=ET.cw_name AND NOT EXISTS('
+ ' SELECT 1 from is_relation '
+ ' WHERE eid_from=X.eid AND eid_to=ET.cw_eid)' % rtype)
--- a/schema.py Fri Jul 31 23:26:52 2009 +0200
+++ b/schema.py Sat Aug 01 00:01:12 2009 +0200
@@ -34,6 +34,9 @@
schema.use_py_datetime()
nodes.use_py_datetime()
+PURE_VIRTUAL_RTYPES = set(('identity', 'has_text',))
+VIRTUAL_RTYPES = set(('eid', 'identity', 'has_text',))
+
# set of meta-relations available for every entity types
META_RELATIONS_TYPES = set((
'owned_by', 'created_by', 'is', 'is_instance_of', 'identity',
@@ -866,9 +869,9 @@
"""
self.info('loading %s schemas', ', '.join(config.cubes()))
if config.apphome:
- path = reversed([config.apphome] + config.cubes_path())
+ path = tuple(reversed([config.apphome] + config.cubes_path()))
else:
- path = reversed(config.cubes_path())
+ path = tuple(reversed(config.cubes_path()))
try:
return super(CubicWebSchemaLoader, self).load(config, path=path, **kwargs)
finally:
--- a/schemas/workflow.py Fri Jul 31 23:26:52 2009 +0200
+++ b/schemas/workflow.py Sat Aug 01 00:01:12 2009 +0200
@@ -119,7 +119,7 @@
"""indicate the current state of an entity"""
# not inlined intentionnaly since when using ldap sources, user'state
# has to be stored outside the CWUser table
-
+ inlined = False
# add/delete perms given to managers/users, after what most of the job
# is done by workflow enforcment
permissions = {
--- a/server/__init__.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/__init__.py Sat Aug 01 00:01:12 2009 +0200
@@ -15,8 +15,40 @@
from logilab.common.modutils import LazyObject
+# server-side debugging #########################################################
+
# server debugging flag
-DEBUG = False
+DBG_RQL = 1 # rql execution information
+DBG_SQL = 2 # executed sql
+DBG_REPO = 4 # repository events
+DBG_MORE = 8 # repository events
+
+
+# 2: + executed sql
+# 3: + additional debug information
+DEBUG = 0
+def set_debug(debugmode):
+ global DEBUG
+ if not debugmode:
+ DEBUG = 0
+ return
+ if isinstance(debugmode, basestring):
+ debugmode = globals()[debugmode]
+ DEBUG |= debugmode
+
+def debugged(func):
+ """decorator to activate debug mode"""
+ def wrapped(*args, **kwargs):
+ global DEBUG
+ DEBUG = True
+ try:
+ return func(*args, **kwargs)
+ finally:
+ DEBUG = False
+ return wrapped
+
+
+# database initialization ######################################################
def init_repository(config, interactive=True, drop=False, vreg=None):
"""initialise a repository database by creating tables add filling them
@@ -92,14 +124,12 @@
else:
login, pwd = unicode(source['db-user']), source['db-password']
print '-> inserting default user and default groups.'
- needisfix = []
- for group in BASE_GROUPS:
+ # sort for eid predicatability as expected in some server tests
+ for group in sorted(BASE_GROUPS):
rset = session.execute('INSERT CWGroup X: X name %(name)s',
{'name': unicode(group)})
- needisfix.append( (rset.rows[0][0], rset.description[0][0]) )
rset = session.execute('INSERT CWUser X: X login %(login)s, X upassword %(pwd)s',
{'login': login, 'pwd': pwd})
- needisfix.append( (rset.rows[0][0], rset.description[0][0]) )
session.execute('SET U in_group G WHERE G name "managers"')
session.commit()
# reloging using the admin user
@@ -109,13 +139,6 @@
handler = config.migration_handler(schema, interactive=False,
cnx=cnx, repo=repo)
initialize_schema(config, schema, handler)
- # admin user and groups have been added before schema entities, fix the 'is'
- # relation
- for eid, etype in needisfix:
- handler.session.unsafe_execute('SET X is E WHERE X eid %(x)s, E name %(name)s',
- {'x': eid, 'name': etype}, 'x')
- handler.session.unsafe_execute('SET X is_instance_of E WHERE X eid %(x)s, E name %(name)s',
- {'x': eid, 'name': etype}, 'x')
# insert versions
handler.cmd_add_entity('CWProperty', pkey=u'system.version.cubicweb',
value=unicode(config.cubicweb_version()))
@@ -123,6 +146,15 @@
handler.cmd_add_entity('CWProperty',
pkey=u'system.version.%s' % cube.lower(),
value=unicode(config.cube_version(cube)))
+ # some entities have been added before schema entities, fix the 'is' and
+ # 'is_instance_of' relations
+ for rtype in ('is', 'is_instance_of'):
+ handler.sqlexec(
+ 'INSERT INTO %s_relation '
+ 'SELECT X.eid, ET.cw_eid FROM entities as X, cw_CWEType as ET '
+ 'WHERE X.type=ET.cw_name AND NOT EXISTS('
+ ' SELECT 1 from is_relation '
+ ' WHERE eid_from=X.eid AND eid_to=ET.cw_eid)' % rtype)
# yoo !
cnx.commit()
config.enabled_sources = None
@@ -161,20 +193,6 @@
for path in reversed(paths):
mhandler.exec_event_script('post%s' % event, path)
-def set_debug(debugmode):
- global DEBUG
- DEBUG = debugmode
-
-def debugged(func):
- """decorator to activate debug mode"""
- def wrapped(*args, **kwargs):
- global DEBUG
- DEBUG = True
- try:
- return func(*args, **kwargs)
- finally:
- DEBUG = False
- return wrapped
# sqlite'stored procedures have to be registered at connexion opening time
SQL_CONNECT_HOOKS = {}
--- a/server/checkintegrity.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/checkintegrity.py Sat Aug 01 00:01:12 2009 +0200
@@ -13,6 +13,7 @@
from logilab.common.shellutils import ProgressBar
+from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server.sqlutils import SQL_PREFIX
def has_eid(sqlcursor, eid, eids):
@@ -196,9 +197,7 @@
"""check all relations registered in the repo system table"""
print 'Checking relations'
for rschema in schema.relations():
- if rschema.is_final():
- continue
- if rschema == 'identity':
+ if rschema.is_final() or rschema in PURE_VIRTUAL_RTYPES:
continue
if rschema.inlined:
for subjtype in rschema.subjects():
--- a/server/hookhelper.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/hookhelper.py Sat Aug 01 00:01:12 2009 +0200
@@ -76,6 +76,8 @@
relation hooks, the relation may has been deleted at this point, so
we have handle that
"""
+ if eid in session.transaction_data.get('neweids', ()):
+ return
pending = session.transaction_data.get('pendingrelations', ())
for eidfrom, rtype, eidto in reversed(pending):
if rtype == 'in_state' and eidfrom == eid:
--- a/server/hooks.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/hooks.py Sat Aug 01 00:01:12 2009 +0200
@@ -17,69 +17,89 @@
get_user_sessions, rproperty)
from cubicweb.server.repository import FTIndexEntityOp
+# special relations that don't have to be checked for integrity, usually
+# because they are handled internally by hooks (so we trust ourselves)
+DONT_CHECK_RTYPES_ON_ADD = set(('owned_by', 'created_by',
+ 'is', 'is_instance_of',
+ 'wf_info_for', 'from_state', 'to_state'))
+DONT_CHECK_RTYPES_ON_DEL = set(('is', 'is_instance_of',
+ 'wf_info_for', 'from_state', 'to_state'))
+
+
def relation_deleted(session, eidfrom, rtype, eidto):
session.transaction_data.setdefault('pendingrelations', []).append(
(eidfrom, rtype, eidto))
+def eschema_type_eid(session, etype):
+ """get eid of the CWEType entity for the given yams type"""
+ eschema = session.repo.schema.eschema(etype)
+ # eschema.eid is None if schema has been readen from the filesystem, not
+ # from the database (eg during tests)
+ if eschema.eid is None:
+ eschema.eid = session.unsafe_execute(
+ 'Any X WHERE X is CWEType, X name %(name)s', {'name': etype})[0][0]
+ return eschema.eid
-# base meta-data handling #####################################################
+
+# base meta-data handling ######################################################
def setctime_before_add_entity(session, entity):
"""before create a new entity -> set creation and modification date
this is a conveniency hook, you shouldn't have to disable it
"""
- if not 'creation_date' in entity:
- entity['creation_date'] = datetime.now()
- if not 'modification_date' in entity:
- entity['modification_date'] = datetime.now()
- if not 'cwuri' in entity:
- if not session.get_shared_data('do-not-insert-cwuri'):
- entity['cwuri'] = session.base_url() + u'eid/%s' % entity.eid
+ timestamp = datetime.now()
+ entity.setdefault('creation_date', timestamp)
+ entity.setdefault('modification_date', timestamp)
+ if not session.get_shared_data('do-not-insert-cwuri'):
+ entity.setdefault('cwuri', u'%seid/%s' % (session.base_url(), entity.eid))
+
def setmtime_before_update_entity(session, entity):
"""update an entity -> set modification date"""
- if not 'modification_date' in entity:
- entity['modification_date'] = datetime.now()
+ entity.setdefault('modification_date', datetime.now())
+
class SetCreatorOp(PreCommitOperation):
def precommit_event(self):
- if self.eid in self.session.transaction_data.get('pendingeids', ()):
+ session = self.session
+ if self.entity.eid in session.transaction_data.get('pendingeids', ()):
# entity have been created and deleted in the same transaction
return
- ueid = self.session.user.eid
- execute = self.session.unsafe_execute
- if not execute('Any X WHERE X created_by U, X eid %(x)s',
- {'x': self.eid}, 'x'):
- execute('SET X created_by U WHERE X eid %(x)s, U eid %(u)s',
- {'x': self.eid, 'u': ueid}, 'x')
+ if not self.entity.created_by:
+ session.add_relation(self.entity.eid, 'created_by', session.user.eid)
+
def setowner_after_add_entity(session, entity):
"""create a new entity -> set owner and creator metadata"""
asession = session.actual_session()
if not asession.is_internal_session:
- session.unsafe_execute('SET X owned_by U WHERE X eid %(x)s, U eid %(u)s',
- {'x': entity.eid, 'u': asession.user.eid}, 'x')
- SetCreatorOp(asession, eid=entity.eid)
+ session.add_relation(entity.eid, 'owned_by', asession.user.eid)
+ SetCreatorOp(asession, entity=entity)
+
def setis_after_add_entity(session, entity):
"""create a new entity -> set is relation"""
if hasattr(entity, '_cw_recreating'):
return
- session.unsafe_execute('SET X is E WHERE X eid %(x)s, E name %(name)s',
- {'x': entity.eid, 'name': entity.id}, 'x')
+ try:
+ session.add_relation(entity.eid, 'is',
+ eschema_type_eid(session, entity.id))
+ except IndexError:
+ # during schema serialization, skip
+ return
# XXX < 2.50 bw compat
if not session.get_shared_data('do-not-insert-is_instance_of'):
- basetypes = entity.e_schema.ancestors() + [entity.e_schema]
- session.unsafe_execute('SET X is_instance_of E WHERE X eid %%(x)s, E name IN (%s)' %
- ','.join("'%s'" % str(etype) for etype in basetypes),
- {'x': entity.eid}, 'x')
+ for etype in entity.e_schema.ancestors() + [entity.e_schema]:
+ session.add_relation(entity.eid, 'is_instance_of',
+ eschema_type_eid(session, etype))
+
def setowner_after_add_user(session, entity):
"""when a user has been created, add owned_by relation on itself"""
- session.unsafe_execute('SET X owned_by X WHERE X eid %(x)s',
- {'x': entity.eid}, 'x')
+ session.add_relation(entity.eid, 'owned_by', entity.eid)
+
def fti_update_after_add_relation(session, eidfrom, rtype, eidto):
"""sync fulltext index when relevant relation is added. Reindexing the
@@ -91,6 +111,8 @@
FTIndexEntityOp(session, entity=session.entity_from_eid(eidto))
elif ftcontainer == 'object':
FTIndexEntityOp(session, entity=session.entity_from_eid(eidfrom))
+
+
def fti_update_after_delete_relation(session, eidfrom, rtype, eidto):
"""sync fulltext index when relevant relation is deleted. Reindexing both
entities is necessary.
@@ -99,6 +121,7 @@
FTIndexEntityOp(session, entity=session.entity_from_eid(eidto))
FTIndexEntityOp(session, entity=session.entity_from_eid(eidfrom))
+
class SyncOwnersOp(PreCommitOperation):
def precommit_event(self):
@@ -107,12 +130,13 @@
{'c': self.compositeeid, 'x': self.composedeid},
('c', 'x'))
+
def sync_owner_after_add_composite_relation(session, eidfrom, rtype, eidto):
"""when adding composite relation, the composed should have the same owners
has the composite
"""
if rtype == 'wf_info_for':
- # skip this special composite relation
+ # skip this special composite relation # XXX (syt) why?
return
composite = rproperty(session, rtype, eidfrom, eidto, 'composite')
if composite == 'subject':
@@ -120,6 +144,7 @@
elif composite == 'object':
SyncOwnersOp(session, compositeeid=eidto, composedeid=eidfrom)
+
def _register_metadata_hooks(hm):
"""register meta-data related hooks on the hooks manager"""
hm.register_hook(setctime_before_add_entity, 'before_add_entity', '')
@@ -133,6 +158,7 @@
if 'CWUser' in hm.schema:
hm.register_hook(setowner_after_add_user, 'after_add_entity', 'CWUser')
+
# core hooks ##################################################################
class DelayedDeleteOp(PreCommitOperation):
@@ -142,12 +168,15 @@
def precommit_event(self):
session = self.session
- if not self.eid in session.transaction_data.get('pendingeids', ()):
+ # don't do anything if the entity is being created or deleted
+ if not (self.eid in session.transaction_data.get('pendingeids', ()) or
+ self.eid in session.transaction_data.get('neweids', ())):
etype = session.describe(self.eid)[0]
session.unsafe_execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
% (etype, self.relation),
{'x': self.eid}, 'x')
+
def handle_composite_before_del_relation(session, eidfrom, rtype, eidto):
"""delete the object of composite relation"""
composite = rproperty(session, rtype, eidfrom, eidto, 'composite')
@@ -156,6 +185,7 @@
elif composite == 'object':
DelayedDeleteOp(session, eid=eidfrom, relation='X %s Y' % rtype)
+
def before_del_group(session, eid):
"""check that we don't remove the owners group"""
check_internal_entity(session, eid, ('owners',))
@@ -185,6 +215,7 @@
def commit_event(self):
pass
+
def cstrcheck_after_add_relation(session, eidfrom, rtype, eidto):
"""check the relation satisfy its constraints
@@ -210,9 +241,6 @@
raise ValidationError(entity.eid, {attr: msg % val})
-
-
-
class CheckRequiredRelationOperation(LateOperation):
"""checking relation cardinality has to be done after commit in
case the relation is being replaced
@@ -227,9 +255,8 @@
etype = self.session.describe(self.eid)[0]
_ = self.session._
msg = _('at least one relation %(rtype)s is required on %(etype)s (%(eid)s)')
- raise ValidationError(self.eid, {self.rtype: msg % {'rtype': _(self.rtype),
- 'etype': _(etype),
- 'eid': self.eid}})
+ msg %= {'rtype': _(self.rtype), 'etype': _(etype), 'eid': self.eid}
+ raise ValidationError(self.eid, {self.rtype: msg})
def commit_event(self):
pass
@@ -237,16 +264,19 @@
def _rql(self):
raise NotImplementedError()
+
class CheckSRelationOp(CheckRequiredRelationOperation):
"""check required subject relation"""
def _rql(self):
return 'Any O WHERE S eid %%(x)s, S %s O' % self.rtype, {'x': self.eid}, 'x'
+
class CheckORelationOp(CheckRequiredRelationOperation):
"""check required object relation"""
def _rql(self):
return 'Any S WHERE O eid %%(x)s, S %s O' % self.rtype, {'x': self.eid}, 'x'
+
def checkrel_if_necessary(session, opcls, rtype, eid):
"""check an equivalent operation has not already been added"""
for op in session.pending_operations:
@@ -255,12 +285,13 @@
else:
opcls(session, rtype=rtype, eid=eid)
+
def cardinalitycheck_after_add_entity(session, entity):
"""check cardinalities are satisfied"""
eid = entity.eid
for rschema, targetschemas, x in entity.e_schema.relation_definitions():
# skip automatically handled relations
- if rschema.type in ('owned_by', 'created_by', 'is', 'is_instance_of'):
+ if rschema.type in DONT_CHECK_RTYPES_ON_ADD:
continue
if x == 'subject':
subjtype = entity.e_schema
@@ -276,8 +307,11 @@
if card[cardindex] in '1+':
checkrel_if_necessary(session, opcls, rschema.type, eid)
+
def cardinalitycheck_before_del_relation(session, eidfrom, rtype, eidto):
"""check cardinalities are satisfied"""
+ if rtype in DONT_CHECK_RTYPES_ON_DEL:
+ return
card = rproperty(session, rtype, eidfrom, eidto, 'cardinality')
pendingeids = session.transaction_data.get('pendingeids', ())
if card[0] in '1+' and not eidfrom in pendingeids:
@@ -314,6 +348,7 @@
Operation.__init__(self, session, *args, **kwargs)
self.group = result[0][0]
+
class DeleteGroupOp(GroupOperation):
"""synchronize user when a in_group relation has been deleted"""
def commit_event(self):
@@ -325,6 +360,7 @@
self.error('user %s not in group %s', self.cnxuser, self.group)
return
+
def after_del_in_group(session, fromeid, rtype, toeid):
"""modify user permission, need to update users"""
for session_ in get_user_sessions(session.repo, fromeid):
@@ -342,6 +378,7 @@
return
groups.add(self.group)
+
def after_add_in_group(session, fromeid, rtype, toeid):
"""modify user permission, need to update users"""
for session_ in get_user_sessions(session.repo, fromeid):
@@ -361,11 +398,13 @@
except BadConnectionId:
pass # already closed
+
def after_del_user(session, eid):
"""modify user permission, need to update users"""
for session_ in get_user_sessions(session.repo, eid):
DelUserOp(session, session_.id)
+
def _register_usergroup_hooks(hm):
"""register user/group related hooks on the hooks manager"""
hm.register_hook(after_del_user, 'after_delete_entity', 'CWUser')
@@ -424,19 +463,20 @@
def precommit_event(self):
session = self.session
entity = self.entity
- rset = session.execute('Any S WHERE ET initial_state S, ET name %(name)s',
- {'name': str(entity.e_schema)})
# if there is an initial state and the entity's state is not set,
# use the initial state as a default state
pendingeids = session.transaction_data.get('pendingeids', ())
- if rset and not entity.eid in pendingeids and not entity.in_state:
- session.unsafe_execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
- {'x' : entity.eid, 's' : rset[0][0]}, 'x')
+ if not entity.eid in pendingeids and not entity.in_state:
+ rset = session.execute('Any S WHERE ET initial_state S, ET name %(name)s',
+ {'name': entity.id})
+ if rset:
+ session.add_relation(entity.eid, 'in_state', rset[0][0])
def set_initial_state_after_add(session, entity):
SetInitialStateOp(session, entity=entity)
+
def _register_wf_hooks(hm):
"""register workflow related hooks on the hooks manager"""
if 'in_state' in hm.schema:
@@ -461,6 +501,7 @@
except KeyError:
self.error('%s has no associated value', self.key)
+
class ChangeCWPropertyOp(Operation):
"""a user's custom properties has been added/changed"""
@@ -468,6 +509,7 @@
"""the observed connections pool has been commited"""
self.epropdict[self.key] = self.value
+
class AddCWPropertyOp(Operation):
"""a user's custom properties has been added/changed"""
@@ -478,6 +520,7 @@
self.repo.vreg.eprop_values[eprop.pkey] = eprop.value
# if for_user is set, update is handled by a ChangeCWPropertyOp operation
+
def after_add_eproperty(session, entity):
key, value = entity.pkey, entity.value
try:
@@ -487,11 +530,11 @@
except ValueError, ex:
raise ValidationError(entity.eid, {'value': session._(str(ex))})
if not session.user.matching_groups('managers'):
- session.unsafe_execute('SET P for_user U WHERE P eid %(x)s,U eid %(u)s',
- {'x': entity.eid, 'u': session.user.eid}, 'x')
+ session.add_relation(entity.eid, 'for_user', session.user.eid)
else:
AddCWPropertyOp(session, eprop=entity)
+
def after_update_eproperty(session, entity):
key, value = entity.pkey, entity.value
try:
@@ -509,6 +552,7 @@
ChangeCWPropertyOp(session, epropdict=session.vreg.eprop_values,
key=key, value=value)
+
def before_del_eproperty(session, eid):
for eidfrom, rtype, eidto in session.transaction_data.get('pendingrelations', ()):
if rtype == 'for_user' and eidfrom == eid:
@@ -519,6 +563,7 @@
{'x': eid}, 'x')[0][0]
DelCWPropertyOp(session, epropdict=session.vreg.eprop_values, key=key)
+
def after_add_for_user(session, fromeid, rtype, toeid):
if not session.describe(fromeid)[0] == 'CWProperty':
return
@@ -531,6 +576,7 @@
ChangeCWPropertyOp(session, epropdict=session_.user.properties,
key=key, value=value)
+
def before_del_for_user(session, fromeid, rtype, toeid):
key = session.execute('Any K WHERE P eid %(x)s, P pkey K',
{'x': fromeid}, 'x')[0][0]
@@ -538,6 +584,7 @@
for session_ in get_user_sessions(session.repo, toeid):
DelCWPropertyOp(session, epropdict=session_.user.properties, key=key)
+
def _register_eproperty_hooks(hm):
"""register workflow related hooks on the hooks manager"""
hm.register_hook(after_add_eproperty, 'after_add_entity', 'CWProperty')
--- a/server/migractions.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/migractions.py Sat Aug 01 00:01:12 2009 +0200
@@ -30,7 +30,7 @@
from yams.schema2sql import eschema2sql, rschema2sql
from cubicweb import AuthenticationError, ETYPE_NAME_MAP
-from cubicweb.schema import CubicWebRelationSchema
+from cubicweb.schema import VIRTUAL_RTYPES, CubicWebRelationSchema
from cubicweb.dbapi import get_repository, repo_connect
from cubicweb.common.migration import MigrationHelper, yes
@@ -159,6 +159,7 @@
except (KeyboardInterrupt, EOFError):
print 'aborting...'
sys.exit(0)
+ self.session.keep_pool_mode('transaction')
return self._cnx
@property
@@ -240,7 +241,7 @@
def _synchronize_permissions(self, ertype):
"""permission synchronization for an entity or relation type"""
- if ertype in ('eid', 'has_text', 'identity'):
+ if ertype in VIRTUAL_RTYPES:
return
newrschema = self.fs_schema[ertype]
teid = self.repo.schema[ertype].eid
--- a/server/querier.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/querier.py Sat Aug 01 00:01:12 2009 +0200
@@ -24,6 +24,8 @@
from cubicweb.server.rqlannotation import SQLGenAnnotator, set_qdata
from cubicweb.server.ssplanner import add_types_restriction
+READ_ONLY_RTYPES = set(('eid', 'has_text', 'is', 'is_instance_of', 'identity'))
+
def empty_rset(session, rql, args, rqlst=None):
"""build an empty result set object"""
return ResultSet([], rql, args, rqlst=rqlst)
@@ -67,7 +69,7 @@
if rqlst.where is not None:
for rel in rqlst.where.iget_nodes(Relation):
# XXX has_text may have specific perm ?
- if rel.r_type in ('is', 'is_instance_of', 'has_text', 'identity', 'eid'):
+ if rel.r_type in READ_ONLY_RTYPES:
continue
if not schema.rschema(rel.r_type).has_access(user, 'read'):
raise Unauthorized('read', rel.r_type)
@@ -189,8 +191,6 @@
return rqlst to actually execute
"""
- #if server.DEBUG:
- # print '------- preprocessing', union.as_string('utf8')
noinvariant = set()
if security and not self.session.is_super_session:
self._insert_security(union, noinvariant)
@@ -373,7 +373,7 @@
for relation in rqlst.main_relations:
lhs, rhs = relation.get_variable_parts()
rtype = relation.r_type
- if rtype in ('eid', 'has_text', 'is', 'is_instance_of', 'identity'):
+ if rtype in READ_ONLY_RTYPES:
raise QueryError("can't assign to %s" % rtype)
try:
edef = to_build[str(lhs)]
@@ -586,9 +586,9 @@
always use substitute arguments in queries (eg avoid query such as
'Any X WHERE X eid 123'!)
"""
- if server.DEBUG:
+ if server.DEBUG & (server.DBG_RQL | server.DBG_RQL):
print '*'*80
- print rql
+ print 'QUERIER INPUT', rql
# parse the query and binds variables
if eid_key is not None:
if not isinstance(eid_key, (tuple, list)):
--- a/server/repository.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/repository.py Sat Aug 01 00:01:12 2009 +0200
@@ -34,8 +34,8 @@
ExecutionError, typed_eid,
CW_MIGRATION_MAP)
from cubicweb.cwvreg import CubicWebRegistry
-from cubicweb.schema import CubicWebSchema
-
+from cubicweb.schema import VIRTUAL_RTYPES, CubicWebSchema
+from cubicweb import server
from cubicweb.server.utils import RepoThread, LoopTask
from cubicweb.server.pool import ConnectionsPool, LateOperation, SingleLastOperation
from cubicweb.server.session import Session, InternalSession
@@ -115,7 +115,6 @@
# the web interface but may occurs during test or dbapi connection (though
# not expected for this). So: don't do it, we pretend to ensure repository
# consistency.
- # XXX should probably not use unsafe_execute!
if card[0] in '1?':
rschema = session.repo.schema.rschema(rtype)
if not rschema.inlined:
@@ -935,7 +934,7 @@
eschema = self.schema.eschema(etype)
for rschema, targetschemas, x in eschema.relation_definitions():
rtype = rschema.type
- if rtype == 'identity':
+ if rtype in VIRTUAL_RTYPES:
continue
var = '%s%s' % (rtype.upper(), x.upper())
if x == 'subject':
@@ -988,6 +987,8 @@
source = self.locate_etype_source(etype)
# attribute an eid to the entity before calling hooks
entity.set_eid(self.system_source.create_eid(session))
+ if server.DEBUG & server.DBG_REPO:
+ print 'ADD entity', etype, entity.eid, dict(entity)
entity._is_saved = False # entity has an eid but is not yet saved
relations = []
# if inlined relations are specified, fill entity's related cache to
@@ -1026,9 +1027,10 @@
"""replace an entity in the repository
the type and the eid of an entity must not be changed
"""
- #print 'update', entity
+ etype = str(entity.e_schema)
+ if server.DEBUG & server.DBG_REPO:
+ print 'UPDATE entity', etype, entity.eid, dict(entity)
entity.check()
- etype = str(entity.e_schema)
eschema = entity.e_schema
only_inline_rels, need_fti_update = True, False
relations = []
@@ -1079,10 +1081,11 @@
def glob_delete_entity(self, session, eid):
"""delete an entity and all related entities from the repository"""
- #print 'deleting', eid
# call delete_info before hooks
self._prepare_delete_info(session, eid)
etype, uri, extid = self.type_and_source_from_eid(eid, session)
+ if server.DEBUG & server.DBG_REPO:
+ print 'DELETE entity', etype, eid
source = self.sources_by_uri[uri]
if source.should_call_hooks:
self.hm.call_hooks('before_delete_entity', etype, session, eid)
@@ -1094,11 +1097,9 @@
def glob_add_relation(self, session, subject, rtype, object):
"""add a relation to the repository"""
- assert subject is not None
- assert rtype
- assert object is not None
+ if server.DEBUG & server.DBG_REPO:
+ print 'ADD relation', subject, rtype, object
source = self.locate_relation_source(session, subject, rtype, object)
- #print 'adding', subject, rtype, object, 'to', source
if source.should_call_hooks:
del_existing_rel_if_needed(session, subject, rtype, object)
self.hm.call_hooks('before_add_relation', rtype, session,
@@ -1110,11 +1111,9 @@
def glob_delete_relation(self, session, subject, rtype, object):
"""delete a relation from the repository"""
- assert subject is not None
- assert rtype
- assert object is not None
+ if server.DEBUG & server.DBG_REPO:
+ print 'DELETE relation', subject, rtype, object
source = self.locate_relation_source(session, subject, rtype, object)
- #print 'delete rel', subject, rtype, object
if source.should_call_hooks:
self.hm.call_hooks('before_delete_relation', rtype, session,
subject, rtype, object)
--- a/server/schemahooks.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/schemahooks.py Sat Aug 01 00:01:12 2009 +0200
@@ -371,12 +371,12 @@
def precommit_event(self):
session = self.session
entity = self.entity
- fromentity = entity.from_entity[0]
- relationtype = entity.relation_type[0]
+ fromentity = entity.stype
+ relationtype = entity.rtype
session.execute('SET X ordernum Y+1 WHERE X from_entity SE, SE eid %(se)s, X ordernum Y, X ordernum >= %(order)s, NOT X eid %(x)s',
{'x': entity.eid, 'se': fromentity.eid, 'order': entity.ordernum or 0})
subj, rtype = str(fromentity.name), str(relationtype.name)
- obj = str(entity.to_entity[0].name)
+ obj = str(entity.otype.name)
# at this point default is a string or None, but we need a correctly
# typed value
default = entity.defaultval
@@ -444,12 +444,12 @@
def precommit_event(self):
session = self.session
entity = self.entity
- fromentity = entity.from_entity[0]
- relationtype = entity.relation_type[0]
+ fromentity = entity.stype.name
+ relationtype = entity.rtype
session.execute('SET X ordernum Y+1 WHERE X from_entity SE, SE eid %(se)s, X ordernum Y, X ordernum >= %(order)s, NOT X eid %(x)s',
{'x': entity.eid, 'se': fromentity.eid, 'order': entity.ordernum or 0})
subj, rtype = str(fromentity.name), str(relationtype.name)
- obj = str(entity.to_entity[0].name)
+ obj = str(entity.otype.name)
card = entity.get('cardinality')
rdef = RelationDefinition(subj, rtype, obj,
cardinality=card,
@@ -586,8 +586,8 @@
def after_update_erdef(session, entity):
- desttype = entity.to_entity[0].name
- rschema = session.repo.schema[entity.relation_type[0].name]
+ desttype = entity.otype.name
+ rschema = session.repo.schema[entity.rtype.name]
newvalues = {}
for prop in rschema.rproperty_defs(desttype):
if prop == 'constraints':
@@ -597,7 +597,7 @@
if prop in entity:
newvalues[prop] = entity[prop]
if newvalues:
- subjtype = entity.from_entity[0].name
+ subjtype = entity.stype.name
UpdateRelationDefOp(session, (subjtype, desttype),
rschema=rschema, values=newvalues)
--- a/server/schemaserial.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/schemaserial.py Sat Aug 01 00:01:12 2009 +0200
@@ -14,7 +14,7 @@
from yams import schema as schemamod, buildobjs as ybo
-from cubicweb.schema import CONSTRAINTS, ETYPE_NAME_MAP
+from cubicweb.schema import CONSTRAINTS, ETYPE_NAME_MAP, VIRTUAL_RTYPES
from cubicweb.server import sqlutils
def group_mapping(cursor, interactive=True):
@@ -284,17 +284,17 @@
if not verbose:
pb_size = len(aller) + len(CONSTRAINTS) + len([x for x in eschemas if x.specializes()])
pb = ProgressBar(pb_size, title=_title)
+ rql = 'INSERT CWConstraintType X: X name %(ct)s'
for cstrtype in CONSTRAINTS:
- rql = 'INSERT CWConstraintType X: X name "%s"' % cstrtype
if verbose:
print rql
- cursor.execute(rql)
+ cursor.execute(rql, {'ct': unicode(cstrtype)})
if not verbose:
pb.update()
groupmap = group_mapping(cursor, interactive=False)
for ertype in aller:
# skip eid and has_text relations
- if ertype in ('eid', 'identity', 'has_text',):
+ if ertype in VIRTUAL_RTYPES:
pb.update()
continue
for rql, kwargs in erschema2rql(schema[ertype]):
@@ -549,12 +549,12 @@
relations, values = frdef_relations_values(rschema, objtype, props)
values.update({'se': subjtype, 'rt': str(rschema), 'oe': objtype})
yield 'SET %s WHERE %s, %s, X is CWAttribute' % (','.join(relations),
- _LOCATE_RDEF_RQL0,
- _LOCATE_RDEF_RQL1), values
+ _LOCATE_RDEF_RQL0,
+ _LOCATE_RDEF_RQL1), values
def updatenfrdef2rql(rschema, subjtype, objtype, props):
relations, values = nfrdef_relations_values(rschema, objtype, props)
values.update({'se': subjtype, 'rt': str(rschema), 'oe': objtype})
yield 'SET %s WHERE %s, %s, X is CWRelation' % (','.join(relations),
- _LOCATE_RDEF_RQL0,
- _LOCATE_RDEF_RQL1), values
+ _LOCATE_RDEF_RQL0,
+ _LOCATE_RDEF_RQL1), values
--- a/server/session.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/session.py Sat Aug 01 00:01:12 2009 +0200
@@ -40,8 +40,6 @@
description.append(term.get_type(solution, args))
return description
-from rql import stmts
-assert hasattr(stmts.Union, 'get_variable_variables'), "You need RQL > 0.18.3"
class Session(RequestSessionMixIn):
"""tie session id, user, connections pool and other session data all
@@ -59,6 +57,7 @@
self.timestamp = self.creation
self.is_internal_session = False
self.is_super_session = False
+ self.default_mode = 'read'
# short cut to querier .execute method
self._execute = repo.querier.execute
# shared data, used to communicate extra information between the client
@@ -74,6 +73,17 @@
def __str__(self):
return '<%ssession %s (%s 0x%x)>' % (self.cnxtype, self.user.login,
self.id, id(self))
+
+ def add_relation(self, fromeid, rtype, toeid):
+ if self.is_super_session:
+ self.repo.glob_add_relation(self, fromeid, rtype, toeid)
+ return
+ self.is_super_session = True
+ try:
+ self.repo.glob_add_relation(self, fromeid, rtype, toeid)
+ finally:
+ self.is_super_session = False
+
# resource accessors ######################################################
def actual_session(self):
@@ -110,13 +120,33 @@
# connection management ###################################################
+ def keep_pool_mode(self, mode):
+ """set pool_mode, e.g. how the session will keep its pool:
+
+ * if mode == 'write', the pool is freed after each ready query, but kept
+ until the transaction's end (eg commit or rollback) when a write query
+ is detected (eg INSERT/SET/DELETE queries)
+
+ * if mode == 'transaction', the pool is only freed after the
+ transaction's end
+
+ notice that a repository has a limited set of pools, and a session has to
+ wait for a free pool to run any rql query (unless it already has a pool
+ set).
+ """
+ assert mode in ('transaction', 'write')
+ if mode == 'transaction':
+ self.default_mode = 'transaction'
+ else: # mode == 'write'
+ self.default_mode = 'read'
+
def get_mode(self):
- return getattr(self._threaddata, 'mode', 'read')
+ return getattr(self._threaddata, 'mode', self.default_mode)
def set_mode(self, value):
self._threaddata.mode = value
mode = property(get_mode, set_mode,
- doc='transaction mode (read/write), resetted to read on '
- 'commit / rollback')
+ doc='transaction mode (read/write/transaction), resetted to'
+ ' default_mode on commit / rollback')
def get_commit_state(self):
return getattr(self._threaddata, 'commit_state', None)
@@ -145,11 +175,11 @@
self._threads_in_transaction.add(threading.currentThread())
return self._threaddata.pool
- def reset_pool(self):
+ def reset_pool(self, ignoremode=False):
"""the session is no longer using its pool, at least for some time"""
# pool may be none if no operation has been done since last commit
# or rollback
- if self.pool is not None and self.mode == 'read':
+ if self.pool is not None and (ignoremode or self.mode == 'read'):
# even in read mode, we must release the current transaction
pool = self.pool
try:
@@ -165,7 +195,7 @@
"""update latest session usage timestamp and reset mode to read"""
self.timestamp = time()
self.local_perm_cache.clear()
- self._threaddata.mode = 'read'
+ self._threaddata.mode = self.default_mode
# shared data handling ###################################################
@@ -308,7 +338,7 @@
self.pending_operations[:] = []
self.transaction_data.clear()
if reset_pool:
- self.reset_pool()
+ self.reset_pool(ignoremode=True)
def rollback(self, reset_pool=True):
"""rollback the current session's transaction"""
@@ -333,7 +363,7 @@
self.pending_operations[:] = []
self.transaction_data.clear()
if reset_pool:
- self.reset_pool()
+ self.reset_pool(ignoremode=True)
def close(self):
"""do not close pool on session close, since they are shared now"""
--- a/server/sources/__init__.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/sources/__init__.py Sat Aug 01 00:01:12 2009 +0200
@@ -12,6 +12,7 @@
from logging import getLogger
from cubicweb import set_log_methods
+from cubicweb.schema import VIRTUAL_RTYPES
from cubicweb.server.sqlutils import SQL_PREFIX
@@ -201,7 +202,7 @@
# delete relations referencing one of those eids
eidcolum = SQL_PREFIX + 'eid'
for rschema in self.schema.relations():
- if rschema.is_final() or rschema.type == 'identity':
+ if rschema.is_final() or rschema.type in VIRTUAL_RTYPES:
continue
if rschema.inlined:
column = SQL_PREFIX + rschema.type
--- a/server/sources/extlite.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/sources/extlite.py Sat Aug 01 00:01:12 2009 +0200
@@ -170,10 +170,9 @@
has a connection set
"""
if cnx._cnx is not None:
- cnx._cnx.close()
# reset _cnx to ensure next thread using cnx will get a new
# connection
- cnx._cnx = None
+ cnx.close()
def syntax_tree_search(self, session, union,
args=None, cachekey=None, varmap=None, debug=0):
--- a/server/sources/native.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/sources/native.py Sat Aug 01 00:01:12 2009 +0200
@@ -44,7 +44,7 @@
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- if server.DEBUG:
+ if server.DEBUG & server.DBG_SQL:
print 'exec', query, args
try:
self.cu.execute(str(query), args)
@@ -113,6 +113,12 @@
'help': 'database host',
'group': 'native-source', 'inputlevel': 1,
}),
+ ('db-port',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'database port',
+ 'group': 'native-source', 'inputlevel': 1,
+ }),
('db-name',
{'type' : 'string',
'default': REQUIRED,
@@ -166,8 +172,7 @@
self.check_connection = lambda cnx: cnx
def pool_reset(cnx):
if cnx._cnx is not None:
- cnx._cnx.close()
- cnx._cnx = None
+ cnx.close()
self.pool_reset = pool_reset
@property
@@ -183,8 +188,9 @@
def clear_eid_cache(self, eid, etype):
"""clear potential caches for the given eid"""
- self._cache.pop('%s X WHERE X eid %s' % (etype, eid), None)
+ self._cache.pop('Any X WHERE X eid %s, X is %s' % (eid, etype), None)
self._cache.pop('Any X WHERE X eid %s' % eid, None)
+ self._cache.pop('Any %s' % eid, None)
def sqlexec(self, session, sql, args=None):
"""execute the query and return its result"""
@@ -299,13 +305,15 @@
necessary to fetch the results (but not the results themselves)
may be cached using this key.
"""
- if server.DEBUG:
- print 'RQL FOR NATIVE SOURCE', self.uri, cachekey
+ if server.DEBUG & server.DBG_RQL:
+ print 'RQL FOR NATIVE SOURCE %s: %s' % (self.uri, union.as_string())
if varmap:
- print 'USING VARMAP', varmap
- print union.as_string()
- if args: print 'ARGS', args
- print 'SOLUTIONS', ','.join(str(s.solutions) for s in union.children)
+ print 'using varmap', varmap
+ if args:
+ print 'args', args
+ if server.DEBUG & server.DBG_MORE:
+ print 'cache key', cachekey
+ print 'solutions', ','.join(str(s.solutions) for s in union.children)
# remember number of actually selected term (sql generation may append some)
if cachekey is None:
self.no_cache += 1
@@ -331,7 +339,7 @@
session.pool.reconnect(self)
cursor = self.doexec(session, sql, args)
res = self.process_result(cursor)
- if server.DEBUG:
+ if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
print '------>', res
return res
@@ -342,12 +350,12 @@
inserts all data by calling .executemany().
"""
if self.uri == 'system':
- if server.DEBUG:
- print 'FLYING RQL FOR SOURCE', self.uri
+ if server.DEBUG & server.DBG_RQL:
+ print 'FLYING RQL FOR SOURCE %s: %s', self.uri, union.as_string()
if varmap:
print 'USING VARMAP', varmap
- print union.as_string()
- print 'SOLUTIONS', ','.join(str(s.solutions) for s in union.children)
+ if server.DEBUG & server.DBG_MORE:
+ print 'SOLUTIONS', ','.join(str(s.solutions) for s in union.children)
# generate sql queries if we are able to do so
sql, query_args = self._rql_sqlgen.generate(union, args, varmap)
query = 'INSERT INTO %s %s' % (table, sql.encode(self.encoding))
@@ -428,9 +436,9 @@
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- if server.DEBUG:
- print 'exec', query, args
cursor = session.pool[self.uri]
+ if server.DEBUG & server.DBG_SQL:
+ print 'exec', query, args, session.pool.connection(self.uri)._cnx
try:
# str(query) to avoid error if it's an unicode string
cursor.execute(str(query), args)
@@ -449,7 +457,7 @@
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- if server.DEBUG:
+ if server.DEBUG & server.DBG_SQL:
print 'execmany', query, 'with', len(args), 'arguments'
cursor = session.pool[self.uri]
try:
--- a/server/sources/pyrorql.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/sources/pyrorql.py Sat Aug 01 00:01:12 2009 +0200
@@ -270,11 +270,12 @@
"""
if not args is None:
args = args.copy()
- if server.DEBUG:
- print 'RQL FOR PYRO SOURCE', self.uri
- print union.as_string()
- if args: print 'ARGS', args
- print 'SOLUTIONS', ','.join(str(s.solutions) for s in union.children)
+ if server.DEBUG & server.DBG_RQL:
+ print 'RQL FOR PYRO SOURCE %s: %s', self.uri, union.as_string()
+ if args:
+ print 'ARGS', args
+ if server.DEBUG & server.DBG_MORE:
+ print 'SOLUTIONS', ','.join(str(s.solutions) for s in union.children)
# get cached cursor anyway
cu = session.pool[self.uri]
if cu is None:
@@ -288,7 +289,7 @@
if server.DEBUG:
print 'unknown eid', ex, 'no results'
return []
- if server.DEBUG:
+ if server.DEBUG & server.DBG_RQL:
print 'TRANSLATED RQL', rql
try:
rset = cu.execute(rql, args, cachekey)
@@ -325,11 +326,11 @@
results = rows
else:
results = []
- if server.DEBUG:
+ if server.DEBUG & server.DBG_RQL:
if len(results)>10:
- print '--------------->', results[:10], '...', len(results)
+ print '-->', results[:10], '...', len(results)
else:
- print '--------------->', results
+ print '-->', results
return results
def _entity_relations_and_kwargs(self, session, entity):
--- a/server/sqlutils.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/sqlutils.py Sat Aug 01 00:01:12 2009 +0200
@@ -24,6 +24,7 @@
from cubicweb.utils import todate, todatetime
from cubicweb.common.uilib import remove_html_tags
from cubicweb.toolsutils import restrict_perms_to_user
+from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server import SQL_CONNECT_HOOKS
from cubicweb.server.utils import crypt_password
@@ -77,7 +78,7 @@
def sqlschema(schema, driver, text_index=True,
user=None, set_owner=False,
- skip_relations=('has_text', 'identity'), skip_entities=()):
+ skip_relations=PURE_VIRTUAL_RTYPES, skip_entities=()):
"""return the system sql schema, according to the given parameters"""
from yams.schema2sql import schema2sql
from cubicweb.server.sources import native
@@ -102,7 +103,7 @@
def sqldropschema(schema, driver, text_index=True,
- skip_relations=('has_text', 'identity'), skip_entities=()):
+ skip_relations=PURE_VIRTUAL_RTYPES, skip_entities=()):
"""return the sql to drop the schema, according to the given parameters"""
from yams.schema2sql import dropschema2sql
from cubicweb.server.sources import native
--- a/server/ssplanner.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/ssplanner.py Sat Aug 01 00:01:12 2009 +0200
@@ -13,6 +13,7 @@
from rql.nodes import Constant
from cubicweb import QueryError, typed_eid
+from cubicweb.schema import VIRTUAL_RTYPES
def add_types_restriction(schema, rqlst, newroot=None, solutions=None):
if newroot is None:
@@ -196,7 +197,7 @@
relations, attrrelations = [], []
getrschema = self.schema.rschema
for relation in rqlst.main_relations:
- if relation.r_type in ('eid', 'has_text', 'identity'):
+ if relation.r_type in VIRTUAL_RTYPES:
raise QueryError('can not assign to %r relation'
% relation.r_type)
lhs, rhs = relation.get_variable_parts()
--- a/server/test/data/config1/application_hooks.py Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-"""hooks for config1
-
- Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
- http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-
-HOOKS = {"after_add_relation" : {"concerned_by" : [lambda: None]}}
--- a/server/test/data/config1/bootstrap_packages Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-# file generated by cubicweb-ctl
--- a/server/test/data/config1/server-ctl.conf Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-# file generated by cubicweb-ctl
-
-APPLICATION HOME=/home/adim/etc/cubicweb.d/crmadim
-DEBUG=
-HOST=
-LOG TRESHOLD=LOG_DEBUG
-NS GROUP=cubicweb
-NS HOST=
-PID FILE=/home/adim/tmp/crmadim.pid
-PORT=
-QUERY LOG FILE=
-UID=1006
-PROFILE=
--- a/server/test/data/config1/sources Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-# file generated by cubicweb-ctl
-
-[system]
-ADAPTER=native
-DBHOST=crater
-DBDRIVER=postgres
-DBNAME=whatever
-ENCODING=UTF-8
-SPLIT_RELATIONS = True
-
--- a/server/test/data/config2/application_hooks.py Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-"""hooks for config2
-
- Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
- http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-
-HOOKS = {"after_delete_relation" : {"todo_by" : [lambda: 1]}}
--- a/server/test/data/config2/bootstrap_packages Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-# file generated by cubicweb-ctl
--- a/server/test/data/config2/server-ctl.conf Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-# file generated by cubicweb-ctl
-
-APPLICATION HOME=/home/adim/etc/cubicweb.d/crmadim
-DEBUG=
-HOST=
-LOG TRESHOLD=LOG_DEBUG
-NS GROUP=cubicweb
-NS HOST=
-PID FILE=/home/adim/tmp/crmadim.pid
-PORT=
-QUERY LOG FILE=
-UID=1006
-PROFILE=
--- a/server/test/data/config2/sources Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-# file generated by cubicweb-ctl
-
-[system]
-ADAPTER=native
-DBHOST=crater
-DBDRIVER=postgres
-DBNAME=whatever
-ENCODING=UTF-8
-SPLIT_RELATIONS = True
-
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/test/data/migratedapp/bootstrap_cubes Sat Aug 01 00:01:12 2009 +0200
@@ -0,0 +1,1 @@
+card,comment,folder,tag,basket,email,file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/test/data/migratedapp/schema.py Sat Aug 01 00:01:12 2009 +0200
@@ -0,0 +1,124 @@
+"""
+
+:organization: Logilab
+:copyright: 2001-2009 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+from yams.buildobjs import (EntityType, RelationType, RelationDefinition,
+ SubjectRelation, ObjectRelation,
+ RichString, String, Int, Boolean, Datetime, Date)
+from yams.constraints import SizeConstraint, UniqueConstraint
+from cubicweb.schema import (WorkflowableEntityType, RQLConstraint,
+ ERQLExpression, RRQLExpression)
+
+class Affaire(EntityType):
+ permissions = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers', ERQLExpression('X concerne S, S owned_by U')),
+ 'update': ('managers', 'owners', ERQLExpression('X concerne S, S owned_by U')),
+ 'delete': ('managers', 'owners', ERQLExpression('X concerne S, S owned_by U')),
+ }
+
+ ref = String(fulltextindexed=True, indexed=True,
+ constraints=[SizeConstraint(16)])
+ sujet = String(fulltextindexed=True,
+ constraints=[SizeConstraint(256)])
+ concerne = SubjectRelation('Societe')
+
+class concerne(RelationType):
+ permissions = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers', RRQLExpression('U has_update_permission S')),
+ 'delete': ('managers', RRQLExpression('O owned_by U')),
+ }
+
+class Note(EntityType):
+ permissions = {'read': ('managers', 'users', 'guests',),
+ 'update': ('managers', 'owners',),
+ 'delete': ('managers', ),
+ 'add': ('managers',
+ ERQLExpression('X ecrit_part PE, U in_group G, '
+ 'PE require_permission P, P name "add_note", '
+ 'P require_group G'),)}
+
+ date = Datetime()
+ type = String(maxsize=1)
+ whatever = Int()
+ mydate = Date(default='TODAY')
+ para = String(maxsize=512)
+ shortpara = String(maxsize=64)
+ ecrit_par = SubjectRelation('Personne', constraints=[RQLConstraint('S concerne A, O concerne A')])
+ attachment = SubjectRelation(('File', 'Image'))
+
+class ecrit_par(RelationType):
+ permissions = {'read': ('managers', 'users', 'guests',),
+ 'delete': ('managers', ),
+ 'add': ('managers',
+ RRQLExpression('O require_permission P, P name "add_note", '
+ 'U in_group G, P require_group G'),)
+ }
+ inlined = True
+ cardinality = '?*'
+
+class Folder2(EntityType):
+ """folders are used to classify entities. They may be defined as a tree.
+ When you include the Folder entity, all application specific entities
+ may then be classified using the "filed_under" relation.
+ """
+ name = String(required=True, indexed=True, internationalizable=True,
+ constraints=[UniqueConstraint(), SizeConstraint(64)])
+ description = RichString(fulltextindexed=True)
+
+ filed_under2 = ObjectRelation('*')
+
+
+class filed_under2(RelationType):
+ """indicates that an entity is classified under a folder"""
+ # is_about has been renamed into filed_under
+ #//* is_about Folder
+ #* filed_under Folder
+
+class Personne(EntityType):
+ nom = String(fulltextindexed=True, required=True, maxsize=64)
+ prenom = String(fulltextindexed=True, maxsize=64)
+ sexe = String(maxsize=1, default='M')
+ promo = String(vocabulary=('bon','pasbon'))
+ titre = String(fulltextindexed=True, maxsize=128)
+ adel = String(maxsize=128)
+ ass = String(maxsize=128)
+ web = String(maxsize=128)
+ tel = Int()
+ fax = Int()
+ datenaiss = Datetime()
+ test = Boolean()
+
+ travaille = SubjectRelation('Societe')
+ concerne = SubjectRelation('Affaire')
+ concerne2 = SubjectRelation('Affaire')
+ connait = SubjectRelation('Personne', symetric=True)
+
+class Societe(EntityType):
+ permissions = {
+ 'read': ('managers', 'users', 'guests'),
+ 'update': ('managers', 'owners'),
+ 'delete': ('managers', 'owners'),
+ 'add': ('managers', 'users',)
+ }
+
+ nom = String(maxsize=64, fulltextindexed=True)
+ web = String(maxsize=128)
+ tel = Int()
+ fax = Int()
+ rncs = String(maxsize=128)
+ ad1 = String(maxsize=128)
+ ad2 = String(maxsize=128)
+ ad3 = String(maxsize=128)
+ cp = String(maxsize=12)
+ ville= String(maxsize=32)
+
+ in_state = SubjectRelation('State', cardinality='?*')
+
+class evaluee(RelationDefinition):
+ subject = ('Personne', 'CWUser', 'Societe')
+ object = ('Note')
--- a/server/test/data/migrschema/Affaire.py Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-"""
-
-:organization: Logilab
-:copyright: 2001-2009 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-
-class Affaire(EntityType):
- permissions = {
- 'read': ('managers', 'users', 'guests'),
- 'add': ('managers', ERQLExpression('X concerne S, S owned_by U')),
- 'update': ('managers', 'owners', ERQLExpression('X concerne S, S owned_by U')),
- 'delete': ('managers', 'owners', ERQLExpression('X concerne S, S owned_by U')),
- }
-
- ref = String(fulltextindexed=True, indexed=True,
- constraints=[SizeConstraint(16)])
- sujet = String(fulltextindexed=True,
- constraints=[SizeConstraint(256)])
-
-class concerne(RelationType):
- permissions = {
- 'read': ('managers', 'users', 'guests'),
- 'add': ('managers', RRQLExpression('U has_update_permission S')),
- 'delete': ('managers', RRQLExpression('O owned_by U')),
- }
-
--- a/server/test/data/migrschema/Folder2.py Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-"""
-
-:organization: Logilab
-:copyright: 2001-2009 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-
-class Folder2(MetaUserEntityType):
- """folders are used to classify entities. They may be defined as a tree.
- When you include the Folder entity, all application specific entities
- may then be classified using the "filed_under" relation.
- """
- name = String(required=True, indexed=True, internationalizable=True,
- constraints=[UniqueConstraint(), SizeConstraint(64)])
- description = RichString(fulltextindexed=True)
-
- filed_under2 = BothWayRelation(
- SubjectRelation('Folder2', description=_("parent folder")),
- ObjectRelation('*'),
- )
-
-
-class filed_under2(MetaUserRelationType):
- """indicates that an entity is classified under a folder"""
- # is_about has been renamed into filed_under
- #//* is_about Folder
- #* filed_under Folder
-
--- a/server/test/data/migrschema/Note.py Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-"""
-
-:organization: Logilab
-:copyright: 2001-2009 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-class Note(EntityType):
-
- permissions = {'read': ('managers', 'users', 'guests',),
- 'update': ('managers', 'owners',),
- 'delete': ('managers', ),
- 'add': ('managers',
- ERQLExpression('X ecrit_part PE, U in_group G, '
- 'PE require_permission P, P name "add_note", '
- 'P require_group G'),)}
-
- date = Datetime()
- type = String(maxsize=1)
- whatever = Int()
- mydate = Date(default='TODAY')
- para = String(maxsize=512)
- shortpara = String(maxsize=64)
- ecrit_par = SubjectRelation('Personne', constraints=[RQLConstraint('S concerne A, O concerne A')])
-
-class ecrit_par(RelationType):
- permissions = {'read': ('managers', 'users', 'guests',),
- 'delete': ('managers', ),
- 'add': ('managers',
- RRQLExpression('O require_permission P, P name "add_note", '
- 'U in_group G, P require_group G'),)
- }
- inlined = True
--- a/server/test/data/migrschema/Personne.sql Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,12 +0,0 @@
-nom ivarchar(64) NOT NULL
-prenom ivarchar(64)
-civility char(1) DEFAULT 'M'
-promo choice('bon','pasbon')
-titre ivarchar(128)
-adel varchar(128)
-ass varchar(128)
-web varchar(128)
-tel integer
-fax integer
-datenaiss datetime
-test boolean
--- a/server/test/data/migrschema/Societe.perms Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-Read: managers, users, guests
--- a/server/test/data/migrschema/Societe.sql Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-nom ivarchar(64)
-web varchar(128)
-tel integer
-fax integer
-rncs varchar(32)
-ad1 varchar(128)
-ad2 varchar(128)
-ad3 varchar(128)
-cp varchar(12)
-ville varchar(32)
--- a/server/test/data/migrschema/relations.rel Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,15 +0,0 @@
-Personne travaille Societe
-Personne evaluee Note
-CWUser evaluee Note
-Societe evaluee Note
-Personne concerne Affaire
-Affaire concerne Societe
-Personne concerne2 Affaire
-
-Personne connait Personne symetric
-
-Societe in_state State inline
-
-Note attachment File
-Note attachment Image
-
--- a/server/test/data/schema.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/data/schema.py Sat Aug 01 00:01:12 2009 +0200
@@ -76,7 +76,7 @@
migrated_from = SubjectRelation('Note')
attachment = SubjectRelation(('File', 'Image'))
- inline1 = SubjectRelation('Affaire', inlined=True)
+ inline1 = SubjectRelation('Affaire', inlined=True, cardinality='?*')
todo_by = SubjectRelation('CWUser')
class Personne(EntityType):
@@ -98,7 +98,7 @@
travaille = SubjectRelation('Societe')
concerne = SubjectRelation('Affaire')
connait = SubjectRelation('Personne')
- inline2 = SubjectRelation('Affaire', inlined=True)
+ inline2 = SubjectRelation('Affaire', inlined=True, cardinality='?*')
comments = ObjectRelation('Comment')
@@ -167,7 +167,12 @@
object = 'Note'
-class see_also(RelationDefinition):
+class see_also_1(RelationDefinition):
+ name = 'see_also'
+ subject = object = 'Folder'
+
+class see_also_2(RelationDefinition):
+ name = 'see_also'
subject = ('Bookmark', 'Note')
object = ('Bookmark', 'Note')
@@ -180,14 +185,13 @@
subject = 'Note'
object ='Personne'
constraints = [RQLConstraint('E concerns P, X version_of P')]
+ cardinality = '?*'
class ecrit_par_2(RelationDefinition):
name = 'ecrit_par'
subject = 'Note'
object ='CWUser'
-
-class see_also(RelationDefinition):
- subject = object = 'Folder'
+ cardinality='?*'
class copain(RelationDefinition):
@@ -202,7 +206,7 @@
object = 'Folder'
class require_permission(RelationDefinition):
- subject = ('Card', 'Note')
+ subject = ('Card', 'Note', 'Personne')
object = 'CWPermission'
class require_state(RelationDefinition):
--- a/server/test/unittest_config.py Fri Jul 31 23:26:52 2009 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-"""tests for server config
-
-:organization: Logilab
-:copyright: 2001-2009 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-
-from os.path import join, dirname
-
-from logilab.common.testlib import TestCase, unittest_main
-
-from cubicweb.devtools import TestServerConfiguration
-
-class ConfigTC(TestCase):
-
- def test_load_hooks_twice(self):
- class vreg:
- @staticmethod
- def registry_objects(registry):
- return []
-
- cfg1 = TestServerConfiguration('data/config1')
- cfg1.bootstrap_cubes()
- cfg2 = TestServerConfiguration('data/config2')
- cfg2.bootstrap_cubes()
- self.failIf(cfg1.load_hooks(vreg) is cfg2.load_hooks(vreg))
- self.failUnless('after_add_relation' in cfg1.load_hooks(vreg))
- self.failUnless('after_delete_relation' in cfg2.load_hooks(vreg))
-
-
-if __name__ == '__main__':
- unittest_main()
--- a/server/test/unittest_hooks.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/unittest_hooks.py Sat Aug 01 00:01:12 2009 +0200
@@ -62,12 +62,12 @@
def test_delete_if_singlecard1(self):
self.assertEquals(self.repo.schema['in_state'].inlined, False)
- ueid, = self.execute('INSERT CWUser X: X login "toto", X upassword "hop", X in_group Y, X in_state S '
- 'WHERE Y name "users", S name "activated"')[0]
+ ueid = self.create_user('toto')
self.commit()
self.execute('SET X in_state S WHERE S name "deactivated", X eid %(x)s', {'x': ueid})
rset = self.execute('Any S WHERE X in_state S, X eid %(x)s', {'x': ueid})
self.assertEquals(len(rset), 1)
+ self.commit()
self.assertRaises(Exception, self.execute, 'SET X in_state S WHERE S name "deactivated", X eid %s' % ueid)
rset2 = self.execute('Any S WHERE X in_state S, X eid %(x)s', {'x': ueid})
self.assertEquals(rset.rows, rset2.rows)
@@ -246,13 +246,12 @@
class SchemaModificationHooksTC(RepositoryBasedTC):
- #copy_schema = True
def setUp(self):
if not hasattr(self, '_repo'):
# first initialization
repo = self.repo # set by the RepositoryBasedTC metaclass
- # force to read schema from the database
+ # force to read schema from the database to get proper eid set on schema instances
repo.config._cubes = None
repo.fill_schema()
RepositoryBasedTC.setUp(self)
--- a/server/test/unittest_migractions.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/unittest_migractions.py Sat Aug 01 00:01:12 2009 +0200
@@ -3,11 +3,12 @@
"""
from datetime import date
+from os.path import join
from logilab.common.testlib import TestCase, unittest_main
-from cubicweb.devtools.apptest import RepositoryBasedTC, get_versions
from cubicweb import ConfigurationError
+from cubicweb.devtools.apptest import RepositoryBasedTC, get_versions
from cubicweb.schema import CubicWebSchemaLoader
from cubicweb.server.sqlutils import SQL_PREFIX
from cubicweb.server.repository import Repository
@@ -23,7 +24,6 @@
class MigrationCommandsTC(RepositoryBasedTC):
- copy_schema = False
def setUp(self):
if not hasattr(self, '_repo'):
@@ -33,10 +33,10 @@
repo.config._cubes = None
repo.fill_schema()
# hack to read the schema from data/migrschema
- CubicWebSchemaLoader.main_schema_directory = 'migrschema'
+ self.repo.config.appid = join('data', 'migratedapp')
global migrschema
migrschema = self.repo.config.load_schema()
- del CubicWebSchemaLoader.main_schema_directory
+ self.repo.config.appid = 'data'
assert 'Folder' in migrschema
self.repo.hm.deactivate_verification_hooks()
RepositoryBasedTC.setUp(self)
@@ -357,7 +357,7 @@
def test_add_remove_cube_and_deps(self):
cubes = set(self.config.cubes())
schema = self.repo.schema
- self.assertEquals(sorted(schema['see_also']._rproperties.keys()),
+ self.assertEquals(sorted((str(s), str(o)) for s, o in schema['see_also']._rproperties.keys()),
sorted([('EmailThread', 'EmailThread'), ('Folder', 'Folder'),
('Bookmark', 'Bookmark'), ('Bookmark', 'Note'),
('Note', 'Note'), ('Note', 'Bookmark')]))
--- a/server/test/unittest_msplanner.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/unittest_msplanner.py Sat Aug 01 00:01:12 2009 +0200
@@ -50,6 +50,7 @@
{'X': 'CWRelation'}, {'X': 'CWPermission'}, {'X': 'CWProperty'},
{'X': 'CWRType'}, {'X': 'CWUser'}, {'X': 'Email'},
{'X': 'EmailAddress'}, {'X': 'EmailPart'}, {'X': 'EmailThread'},
+ {'X': 'ExternalUri'},
{'X': 'File'}, {'X': 'Folder'}, {'X': 'Image'},
{'X': 'Note'}, {'X': 'Personne'}, {'X': 'RQLExpression'},
{'X': 'Societe'}, {'X': 'State'}, {'X': 'SubDivision'},
@@ -873,13 +874,13 @@
[{'X': 'Card'}, {'X': 'Note'}, {'X': 'State'}])],
[self.cards, self.system], {}, {'X': 'table0.C0'}, []),
('FetchStep',
- [('Any X WHERE X is IN(Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, File, Folder, Image, Personne, RQLExpression, Societe, SubDivision, Tag, TrInfo, Transition)',
+ [('Any X WHERE X is IN(Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, ExternalUri, File, Folder, Image, Personne, RQLExpression, Societe, SubDivision, Tag, TrInfo, Transition)',
sorted([{'X': 'Bookmark'}, {'X': 'Comment'}, {'X': 'Division'},
{'X': 'CWCache'}, {'X': 'CWConstraint'}, {'X': 'CWConstraintType'},
{'X': 'CWEType'}, {'X': 'CWAttribute'}, {'X': 'CWGroup'},
{'X': 'CWRelation'}, {'X': 'CWPermission'}, {'X': 'CWProperty'},
{'X': 'CWRType'}, {'X': 'Email'}, {'X': 'EmailAddress'},
- {'X': 'EmailPart'}, {'X': 'EmailThread'}, {'X': 'File'},
+ {'X': 'EmailPart'}, {'X': 'EmailThread'}, {'X': 'ExternalUri'}, {'X': 'File'},
{'X': 'Folder'}, {'X': 'Image'}, {'X': 'Personne'},
{'X': 'RQLExpression'}, {'X': 'Societe'}, {'X': 'SubDivision'},
{'X': 'Tag'}, {'X': 'TrInfo'}, {'X': 'Transition'}]))],
@@ -922,7 +923,7 @@
[self.system], {'X': 'table3.C0'}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
# extra UnionFetchStep could be avoided but has no cost, so don't care
('UnionFetchStep',
- [('FetchStep', [('Any ET,X WHERE X is ET, ET is CWEType, X is IN(Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, File, Folder, Image, Personne, RQLExpression, Societe, SubDivision, Tag, TrInfo, Transition)',
+ [('FetchStep', [('Any ET,X WHERE X is ET, ET is CWEType, X is IN(Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, ExternalUri, File, Folder, Image, Personne, RQLExpression, Societe, SubDivision, Tag, TrInfo, Transition)',
[{'X': 'Bookmark', 'ET': 'CWEType'}, {'X': 'Comment', 'ET': 'CWEType'},
{'X': 'Division', 'ET': 'CWEType'}, {'X': 'CWCache', 'ET': 'CWEType'},
{'X': 'CWConstraint', 'ET': 'CWEType'}, {'X': 'CWConstraintType', 'ET': 'CWEType'},
@@ -931,7 +932,9 @@
{'X': 'CWPermission', 'ET': 'CWEType'}, {'X': 'CWProperty', 'ET': 'CWEType'},
{'X': 'CWRType', 'ET': 'CWEType'}, {'X': 'Email', 'ET': 'CWEType'},
{'X': 'EmailAddress', 'ET': 'CWEType'}, {'X': 'EmailPart', 'ET': 'CWEType'},
- {'X': 'EmailThread', 'ET': 'CWEType'}, {'X': 'File', 'ET': 'CWEType'},
+ {'X': 'EmailThread', 'ET': 'CWEType'},
+ {'ET': 'CWEType', 'X': 'ExternalUri'},
+ {'X': 'File', 'ET': 'CWEType'},
{'X': 'Folder', 'ET': 'CWEType'}, {'X': 'Image', 'ET': 'CWEType'},
{'X': 'Personne', 'ET': 'CWEType'}, {'X': 'RQLExpression', 'ET': 'CWEType'},
{'X': 'Societe', 'ET': 'CWEType'}, {'X': 'SubDivision', 'ET': 'CWEType'},
@@ -958,7 +961,9 @@
{'ET': 'CWEType', 'X': 'CWProperty'}, {'ET': 'CWEType', 'X': 'CWRType'},
{'ET': 'CWEType', 'X': 'CWUser'}, {'ET': 'CWEType', 'X': 'Email'},
{'ET': 'CWEType', 'X': 'EmailAddress'}, {'ET': 'CWEType', 'X': 'EmailPart'},
- {'ET': 'CWEType', 'X': 'EmailThread'}, {'ET': 'CWEType', 'X': 'File'},
+ {'ET': 'CWEType', 'X': 'EmailThread'},
+ {'ET': 'CWEType', 'X': 'ExternalUri'},
+ {'ET': 'CWEType', 'X': 'File'},
{'ET': 'CWEType', 'X': 'Folder'}, {'ET': 'CWEType', 'X': 'Image'},
{'ET': 'CWEType', 'X': 'Note'}, {'ET': 'CWEType', 'X': 'Personne'},
{'ET': 'CWEType', 'X': 'RQLExpression'}, {'ET': 'CWEType', 'X': 'Societe'},
--- a/server/test/unittest_querier.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/unittest_querier.py Sat Aug 01 00:01:12 2009 +0200
@@ -110,7 +110,7 @@
'ET': 'CWEType', 'ETN': 'String'}])
rql, solutions = partrqls[1]
self.assertEquals(rql, 'Any ETN,X WHERE X is ET, ET name ETN, ET is CWEType, '
- 'X is IN(Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWUser, Card, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, File, Folder, Image, Note, Personne, RQLExpression, Societe, State, SubDivision, Tag, TrInfo, Transition)')
+ 'X is IN(Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWUser, Card, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, ExternalUri, File, Folder, Image, Note, Personne, RQLExpression, Societe, State, SubDivision, Tag, TrInfo, Transition)')
self.assertListEquals(sorted(solutions),
sorted([{'X': 'Bookmark', 'ETN': 'String', 'ET': 'CWEType'},
{'X': 'Card', 'ETN': 'String', 'ET': 'CWEType'},
@@ -131,6 +131,7 @@
{'X': 'CWProperty', 'ETN': 'String', 'ET': 'CWEType'},
{'X': 'CWRType', 'ETN': 'String', 'ET': 'CWEType'},
{'X': 'CWUser', 'ETN': 'String', 'ET': 'CWEType'},
+ {'X': 'ExternalUri', 'ETN': 'String', 'ET': 'CWEType'},
{'X': 'File', 'ETN': 'String', 'ET': 'CWEType'},
{'X': 'Folder', 'ETN': 'String', 'ET': 'CWEType'},
{'X': 'Image', 'ETN': 'String', 'ET': 'CWEType'},
@@ -226,10 +227,10 @@
def test_select_2(self):
rset = self.execute('Any X ORDERBY N WHERE X is CWGroup, X name N')
- self.assertEquals(tuplify(rset.rows), [(3,), (1,), (4,), (2,)])
+ self.assertEquals(tuplify(rset.rows), [(1,), (2,), (3,), (4,)])
self.assertEquals(rset.description, [('CWGroup',), ('CWGroup',), ('CWGroup',), ('CWGroup',)])
rset = self.execute('Any X ORDERBY N DESC WHERE X is CWGroup, X name N')
- self.assertEquals(tuplify(rset.rows), [(2,), (4,), (1,), (3,)])
+ self.assertEquals(tuplify(rset.rows), [(4,), (3,), (2,), (1,)])
def test_select_3(self):
rset = self.execute('Any N GROUPBY N WHERE X is CWGroup, X name N')
@@ -272,7 +273,7 @@
def test_select_5(self):
rset = self.execute('Any X, TMP ORDERBY TMP WHERE X name TMP, X is CWGroup')
- self.assertEquals(tuplify(rset.rows), [(3, 'guests',), (1, 'managers',), (4, 'owners',), (2, 'users',)])
+ self.assertEquals(tuplify(rset.rows), [(1, 'guests',), (2, 'managers',), (3, 'owners',), (4, 'users',)])
self.assertEquals(rset.description, [('CWGroup', 'String',), ('CWGroup', 'String',), ('CWGroup', 'String',), ('CWGroup', 'String',)])
def test_select_6(self):
@@ -344,7 +345,8 @@
peid1 = self.execute("INSERT Personne X: X nom 'bidule'")[0][0]
rset = self.execute('Any X WHERE X eid %(x)s, P? connait X', {'x':peid1}, 'x')
self.assertEquals(rset.rows, [[peid1]])
- rset = self.execute('Any X WHERE X eid %(x)s, X require_permission P?', {'x':peid1}, 'x')
+ rset = self.execute('Any X WHERE X eid %(x)s, X require_permission P?',
+ {'x':peid1}, 'x')
self.assertEquals(rset.rows, [[peid1]])
def test_select_left_outer_join(self):
@@ -464,10 +466,12 @@
'WHERE RT name N, RDEF relation_type RT '
'HAVING COUNT(RDEF) > 10')
self.assertListEquals(rset.rows,
- [[u'description', 11], ['in_basket', 11],
- [u'name', 13], [u'created_by', 33],
- [u'creation_date', 33], [u'is', 33], [u'is_instance_of', 33],
- [u'modification_date', 33], [u'owned_by', 33]])
+ [[u'description', 11],
+ [u'name', 13], [u'created_by', 34],
+ [u'creation_date', 34], [u'cwuri', 34],
+ ['in_basket', 34],
+ [u'is', 34], [u'is_instance_of', 34],
+ [u'modification_date', 34], [u'owned_by', 34]])
def test_select_aggregat_having_dumb(self):
# dumb but should not raise an error
@@ -553,10 +557,10 @@
def test_select_limit_offset(self):
rset = self.execute('CWGroup X ORDERBY N LIMIT 2 WHERE X name N')
- self.assertEquals(tuplify(rset.rows), [(3,), (1,)])
+ self.assertEquals(tuplify(rset.rows), [(1,), (2,)])
self.assertEquals(rset.description, [('CWGroup',), ('CWGroup',)])
rset = self.execute('CWGroup X ORDERBY N LIMIT 2 OFFSET 2 WHERE X name N')
- self.assertEquals(tuplify(rset.rows), [(4,), (2,)])
+ self.assertEquals(tuplify(rset.rows), [(3,), (4,)])
def test_select_symetric(self):
self.execute("INSERT Personne X: X nom 'machin'")
--- a/server/test/unittest_repository.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/unittest_repository.py Sat Aug 01 00:01:12 2009 +0200
@@ -226,12 +226,12 @@
# check order of attributes is respected
self.assertListEquals([r.type for r in schema.eschema('CWAttribute').ordered_relations()
if not r.type in ('eid', 'is', 'is_instance_of', 'identity',
- 'creation_date', 'modification_date',
+ 'creation_date', 'modification_date', 'cwuri',
'owned_by', 'created_by')],
- ['relation_type', 'from_entity', 'to_entity', 'constrained_by',
+ ['relation_type', 'from_entity', 'in_basket', 'to_entity', 'constrained_by',
'cardinality', 'ordernum',
'indexed', 'fulltextindexed', 'internationalizable',
- 'defaultval', 'description_format', 'description'])
+ 'defaultval', 'description', 'description_format'])
self.assertEquals(schema.eschema('CWEType').main_attribute(), 'name')
self.assertEquals(schema.eschema('State').main_attribute(), 'name')
@@ -324,6 +324,10 @@
self.assertRaises(BadConnectionId, repo.set_shared_data, cnxid, 'data', 0)
self.assertRaises(BadConnectionId, repo.get_shared_data, cnxid, 'data')
+ def test_schema_is_relation(self):
+ no_is_rset = self.execute('Any X WHERE NOT X is ET')
+ self.failIf(no_is_rset, no_is_rset.description)
+
class DataHelpersTC(RepositoryBasedTC):
--- a/server/test/unittest_rqlrewrite.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/unittest_rqlrewrite.py Sat Aug 01 00:01:12 2009 +0200
@@ -107,7 +107,7 @@
"Any S WHERE S owned_by C, C eid %(u)s, A eid %(B)s, "
"EXISTS((C identity A) OR (C in_state D, E identity A, "
"E in_state D, D name 'subscribed'), D is State, E is CWUser), "
- "S is IN(Affaire, Basket, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWUser, Card, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, File, Folder, Image, Note, Personne, RQLExpression, Societe, State, SubDivision, Tag, TrInfo, Transition)")
+ "S is IN(Affaire, Basket, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWUser, Card, Comment, Division, Email, EmailAddress, EmailPart, EmailThread, ExternalUri, File, Folder, Image, Note, Personne, RQLExpression, Societe, State, SubDivision, Tag, TrInfo, Transition)")
def test_simplified_rqlst(self):
card_constraint = ('X in_state S, U in_group G, P require_state S,'
--- a/server/test/unittest_security.py Fri Jul 31 23:26:52 2009 +0200
+++ b/server/test/unittest_security.py Sat Aug 01 00:01:12 2009 +0200
@@ -515,6 +515,7 @@
self.execute('SET TI comment %(c)s WHERE TI wf_info_for X, X ref "ARCT01"',
{'c': u'creation'})
self.commit()
+ aff.clear_related_cache('wf_info_for', 'object')
self.assertEquals(aff.latest_trinfo().comment, 'creation')
# but not from_state/to_state
self.execute('SET X in_state S WHERE X ref "ARCT01", S name "ben non"')
--- a/vregistry.py Fri Jul 31 23:26:52 2009 +0200
+++ b/vregistry.py Sat Aug 01 00:01:12 2009 +0200
@@ -266,7 +266,7 @@
oid = obj.id
except AttributeError:
continue
- if oid:
+ if oid and not '__abstract__' in obj.__dict__:
self.register(obj)
def register(self, obj, registryname=None, oid=None, clear=False):