# HG changeset patch # User Aurelien Campeas # Date 1251222139 -7200 # Node ID 4ef565c1d18338eaafc9256117712d69aa7ea003 # Parent bc0a270622c2fe7b23755146f36f1d7ed7253f7b# Parent 88c578819ac1bc6289bbb1403ebb108ca603d0a6 merge diff -r 88c578819ac1 -r 4ef565c1d183 schema.py --- a/schema.py Tue Aug 25 19:40:20 2009 +0200 +++ b/schema.py Tue Aug 25 19:42:19 2009 +0200 @@ -447,6 +447,7 @@ reading_from_database = False entity_class = CubicWebEntitySchema relation_class = CubicWebRelationSchema + no_specialization_inference = ('identity',) def __init__(self, *args, **kwargs): self._eid_index = {} diff -r 88c578819ac1 -r 4ef565c1d183 server/migractions.py --- a/server/migractions.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/migractions.py Tue Aug 25 19:42:19 2009 +0200 @@ -61,6 +61,7 @@ assert repo self._cnx = cnx self.repo = repo + self.session.data['rebuild-infered'] = False elif connect: self.repo_connect() if not schema: @@ -133,33 +134,36 @@ os.chmod(backupfile, 0600) # backup tmpdir = tempfile.mkdtemp(dir=instbkdir) - for source in repo.sources: - try: - source.backup(osp.join(tmpdir,source.uri)) - except Exception, exc: - print '-> error trying to backup [%s]' % exc - if not self.confirm('Continue anyway?', default='n'): - raise SystemExit(1) - bkup = tarfile.open(backupfile, 'w|gz') - for filename in os.listdir(tmpdir): - bkup.add(osp.join(tmpdir,filename), filename) - bkup.close() - shutil.rmtree(tmpdir) - # call hooks - repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp) - # done - print '-> backup file', backupfile + try: + for source in repo.sources: + try: + source.backup(osp.join(tmpdir, source.uri)) + except Exception, exc: + print '-> error trying to backup [%s]' % exc + if not self.confirm('Continue anyway?', default='n'): + raise SystemExit(1) + else: + break + else: + bkup = tarfile.open(backupfile, 'w|gz') + for filename in os.listdir(tmpdir): + bkup.add(osp.join(tmpdir,filename), filename) + bkup.close() + # call hooks + repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp) + # done + print '-> backup file', backupfile + finally: + shutil.rmtree(tmpdir) def restore_database(self, backupfile, drop=True, systemonly=True, askconfirm=True): - config = self.config - repo = self.repo_connect() # check if not osp.exists(backupfile): raise Exception("Backup file %s doesn't exist" % backupfile) return if askconfirm and not self.confirm('Restore %s database from %s ?' - % (config.appid, backupfile)): + % (self.config.appid, backupfile)): return # unpack backup bkup = tarfile.open(backupfile, 'r|gz') @@ -170,6 +174,9 @@ bkup = tarfile.open(backupfile, 'r|gz') tmpdir = tempfile.mkdtemp() bkup.extractall(path=tmpdir) + + self.config.open_connections_pools = False + repo = self.repo_connect() for source in repo.sources: if systemonly and source.uri != 'system': continue @@ -182,6 +189,7 @@ bkup.close() shutil.rmtree(tmpdir) # call hooks + repo.open_connections_pools() repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile) print '-> database restored.' @@ -215,6 +223,7 @@ print 'aborting...' sys.exit(0) self.session.keep_pool_mode('transaction') + self.session.data['rebuild-infered'] = False return self._cnx @property @@ -408,12 +417,12 @@ {'x': str(repoeschema), 'y': str(espschema)}) self.rqlexecall(ss.updateeschema2rql(eschema), ask_confirm=self.verbosity >= 2) - for rschema, targettypes, x in eschema.relation_definitions(True): - if x == 'subject': + for rschema, targettypes, role in eschema.relation_definitions(True): + if role == 'subject': if not rschema in repoeschema.subject_relations(): continue subjtypes, objtypes = [etype], targettypes - else: # x == 'object' + else: # role == 'object' if not rschema in repoeschema.object_relations(): continue subjtypes, objtypes = targettypes, [etype] @@ -619,11 +628,13 @@ in auto mode, automatically register entity's relation where the targeted type is known """ - applschema = self.repo.schema - if etype in applschema: - eschema = applschema[etype] + instschema = self.repo.schema + if etype in instschema: + # XXX (syt) plz explain: if we're adding an entity type, it should + # not be there... + eschema = instschema[etype] if eschema.is_final(): - applschema.del_entity_type(etype) + instschema.del_entity_type(etype) else: eschema = self.fs_schema.eschema(etype) confirm = self.verbosity >= 2 @@ -639,13 +650,46 @@ # ignore those meta relations, they will be automatically added if rschema.type in META_RTYPES: continue - if not rschema.type in applschema: + if not rschema.type in instschema: # need to add the relation type and to commit to get it # actually in the schema self.cmd_add_relation_type(rschema.type, False, commit=True) # register relation definition self.rqlexecall(ss.rdef2rql(rschema, etype, attrschema.type), ask_confirm=confirm) + # take care to newly introduced base class + # XXX some part of this should probably be under the "if auto" block + for spschema in eschema.specialized_by(recursive=False): + try: + instspschema = instschema[spschema] + except KeyError: + # specialized entity type not in schema, ignore + continue + if instspschema.specializes() != eschema: + self.rqlexec('SET D specializes P WHERE D eid %(d)s, P name %(pn)s', + {'d': instspschema.eid, + 'pn': eschema.type}, ask_confirm=confirm) + for rschema, tschemas, role in spschema.relation_definitions(True): + for tschema in tschemas: + if not tschema in instschema: + continue + if role == 'subject': + subjschema = spschema + objschema = tschema + if rschema.final and instspschema.has_subject_relation(rschema): + # attribute already set, has_rdef would check if + # it's of the same type, we don't want this so + # simply skip here + continue + elif role == 'object': + subjschema = tschema + objschema = spschema + if (rschema.rproperty(subjschema, objschema, 'infered') + or (instschema.has_relation(rschema) and + instschema[rschema].has_rdef(subjschema, objschema))): + continue + self.cmd_add_relation_definition( + subjschema.type, rschema.type, objschema.type) if auto: # we have commit here to get relation types actually in the schema self.commit() @@ -655,12 +699,12 @@ # 'owned_by'/'created_by' will be automatically added if rschema.final or rschema.type in META_RTYPES: continue - rtypeadded = rschema.type in applschema + rtypeadded = rschema.type in instschema for targetschema in rschema.objects(etype): # ignore relations where the targeted type is not in the # current instance schema targettype = targetschema.type - if not targettype in applschema and targettype != etype: + if not targettype in instschema and targettype != etype: continue if not rtypeadded: # need to add the relation type and to commit to get it @@ -675,14 +719,14 @@ self.rqlexecall(ss.rdef2rql(rschema, etype, targettype), ask_confirm=confirm) for rschema in eschema.object_relations(): - rtypeadded = rschema.type in applschema or rschema.type in added + rtypeadded = rschema.type in instschema or rschema.type in added for targetschema in rschema.subjects(etype): # ignore relations where the targeted type is not in the # current instance schema targettype = targetschema.type # don't check targettype != etype since in this case the # relation has already been added as a subject relation - if not targettype in applschema: + if not targettype in instschema: continue if not rtypeadded: # need to add the relation type and to commit to get it diff -r 88c578819ac1 -r 4ef565c1d183 server/repository.py --- a/server/repository.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/repository.py Tue Aug 25 19:42:19 2009 +0200 @@ -176,6 +176,11 @@ # create the hooks manager self.hm = HooksManager(self.schema) # open some connections pools + if config.open_connections_pools: + self.open_connections_pools() + + def open_connections_pools(self): + config = self.config self._available_pools = Queue.Queue() self._available_pools.put_nowait(ConnectionsPool(self.sources)) if config.read_instance_schema: @@ -185,7 +190,7 @@ # usually during repository creation self.warning("set fs instance'schema as bootstrap schema") config.bootstrap_cubes() - self.set_bootstrap_schema(self.config.load_schema()) + self.set_bootstrap_schema(config.load_schema()) # need to load the Any and CWUser entity types self.vreg.schema = self.schema etdirectory = join(CW_SOFTWARE_ROOT, 'entities') @@ -198,7 +203,7 @@ # test start: use the file system schema (quicker) self.warning("set fs instance'schema") config.bootstrap_cubes() - self.set_schema(self.config.load_schema()) + self.set_schema(config.load_schema()) if not config.creating: if 'CWProperty' in self.schema: self.vreg.init_properties(self.properties()) @@ -226,8 +231,7 @@ # call instance level initialisation hooks self.hm.call_hooks('server_startup', repo=self) # register a task to cleanup expired session - self.looping_task(self.config['session-time']/3., - self.clean_sessions) + self.looping_task(config['session-time']/3., self.clean_sessions) CW_EVENT_MANAGER.bind('after-registry-reload', self.reset_hooks) # internals ############################################################### @@ -236,8 +240,9 @@ source_config['uri'] = uri return get_source(source_config, self.schema, self) - def set_schema(self, schema, resetvreg=True): - schema.rebuild_infered_relations() + def set_schema(self, schema, resetvreg=True, rebuildinfered=True): + if rebuildinfered: + schema.rebuild_infered_relations() self.info('set schema %s %#x', schema.name, id(schema)) self.debug(', '.join(sorted(str(e) for e in schema.entities()))) self.querier.set_schema(schema) diff -r 88c578819ac1 -r 4ef565c1d183 server/schemahooks.py --- a/server/schemahooks.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/schemahooks.py Tue Aug 25 19:42:19 2009 +0200 @@ -135,7 +135,8 @@ SingleLastOperation.__init__(self, session) def commit_event(self): - self.repo.set_schema(self.repo.schema) + rebuildinfered = self.session.data.get('rebuild-infered', True) + self.repo.set_schema(self.repo.schema, rebuildinfered=rebuildinfered) class MemSchemaOperation(Operation): @@ -718,6 +719,28 @@ erschema.set_rqlexprs(self.perm, rqlexprs) +class MemSchemaSpecializesAdd(MemSchemaOperation): + + def commit_event(self): + eschema = self.session.schema.schema_by_eid(self.etypeeid) + parenteschema = self.session.schema.schema_by_eid(self.parentetypeeid) + eschema._specialized_type = parenteschema.type + parenteschema._specialized_by.append(eschema.type) + + +class MemSchemaSpecializesDel(MemSchemaOperation): + + def commit_event(self): + try: + eschema = self.session.schema.schema_by_eid(self.etypeeid) + parenteschema = self.session.schema.schema_by_eid(self.parentetypeeid) + except KeyError: + # etype removed, nothing to do + return + eschema._specialized_type = None + parenteschema._specialized_by.remove(eschema.type) + + # deletion hooks ############################################################### def before_del_eetype(session, eid): @@ -1015,11 +1038,11 @@ MemSchemaPermissionRQLExpressionDel(session, perm, subject, expr) -def rebuild_infered_relations(session, subject, rtype, object): - # registering a schema operation will trigger a call to - # repo.set_schema() on commit which will in turn rebuild - # infered relation definitions - MemSchemaNotifyChanges(session) +def after_add_specializes(session, subject, rtype, object): + MemSchemaSpecializesAdd(session, etypeeid=subject, parentetypeeid=object) + +def after_del_specializes(session, subject, rtype, object): + MemSchemaSpecializesDel(session, etypeeid=subject, parentetypeeid=object) def _register_schema_hooks(hm): @@ -1043,8 +1066,8 @@ hm.register_hook(after_del_eetype, 'after_delete_entity', 'CWEType') hm.register_hook(before_del_ertype, 'before_delete_entity', 'CWRType') hm.register_hook(after_del_relation_type, 'after_delete_relation', 'relation_type') - hm.register_hook(rebuild_infered_relations, 'after_add_relation', 'specializes') - hm.register_hook(rebuild_infered_relations, 'after_delete_relation', 'specializes') + hm.register_hook(after_add_specializes, 'after_add_relation', 'specializes') + hm.register_hook(after_del_specializes, 'after_delete_relation', 'specializes') # constraints synchronization hooks hm.register_hook(after_add_econstraint, 'after_add_entity', 'CWConstraint') hm.register_hook(after_update_econstraint, 'after_update_entity', 'CWConstraint') diff -r 88c578819ac1 -r 4ef565c1d183 server/serverconfig.py --- a/server/serverconfig.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/serverconfig.py Tue Aug 25 19:42:19 2009 +0200 @@ -178,6 +178,10 @@ }), ) + CubicWebConfiguration.options) + # should we open connections pools (eg connect to sources). This is usually + # necessary... + open_connections_pools = True + # read the schema from the database read_instance_schema = True bootstrap_schema = True diff -r 88c578819ac1 -r 4ef565c1d183 server/serverctl.py --- a/server/serverctl.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/serverctl.py Tue Aug 25 19:42:19 2009 +0200 @@ -257,12 +257,19 @@ 'help': 'verbose mode: will ask all possible configuration questions', } ), + ('automatic', + {'short': 'a', 'type' : 'yn', 'metavar': '', + 'default': 'n', + 'help': 'automatic mode: never ask and use default answer to every question', + } + ), ) def run(self, args): """run the command with its specific arguments""" from logilab.common.adbh import get_adv_func_helper from indexer import get_indexer verbose = self.get('verbose') + automatic = self.get('automatic') appid = pop_arg(args, msg='No instance specified !') config = ServerConfiguration.config_for(appid) create_db = self.config.create_db @@ -277,13 +284,13 @@ try: if helper.users_support: user = source['db-user'] - if not helper.user_exists(cursor, user) and \ - ASK.confirm('Create db user %s ?' % user, default_is_yes=False): + if not helper.user_exists(cursor, user) and (automatic or \ + ASK.confirm('Create db user %s ?' % user, default_is_yes=False)): helper.create_user(source['db-user'], source['db-password']) print '-> user %s created.' % user dbname = source['db-name'] if dbname in helper.list_databases(cursor): - if ASK.confirm('Database %s already exists -- do you want to drop it ?' % dbname): + if automatic or ASK.confirm('Database %s already exists -- do you want to drop it ?' % dbname): cursor.execute('DROP DATABASE %s' % dbname) else: return @@ -311,7 +318,7 @@ cnx.commit() print '-> database for instance %s created and necessary extensions installed.' % appid print - if ASK.confirm('Run db-init to initialize the system database ?'): + if automatic or ASK.confirm('Run db-init to initialize the system database ?'): cmd_run('db-init', config.appid) else: print ('-> nevermind, you can do it later with ' diff -r 88c578819ac1 -r 4ef565c1d183 server/sources/native.py --- a/server/sources/native.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/sources/native.py Tue Aug 25 19:42:19 2009 +0200 @@ -216,11 +216,13 @@ def restore(self, backupfile, drop): """method called to restore a backup of source's data""" - self.close_pool_connections() + if self.repo.config.open_connections_pools: + self.close_pool_connections() try: self.restore_from_file(backupfile, drop) finally: - self.open_pool_connections() + if self.repo.config.open_connections_pools: + self.open_pool_connections() def init(self): self.init_creating() diff -r 88c578819ac1 -r 4ef565c1d183 server/test/data/migratedapp/schema.py --- a/server/test/data/migratedapp/schema.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/test/data/migratedapp/schema.py Tue Aug 25 19:42:19 2009 +0200 @@ -33,7 +33,15 @@ 'delete': ('managers', RRQLExpression('O owned_by U')), } -class Note(EntityType): +class Para(EntityType): + para = String(maxsize=512) + newattr = String() + newinlined = SubjectRelation('Affaire', cardinality='?*', inlined=True) + newnotinlined = SubjectRelation('Affaire', cardinality='?*') + +class Note(Para): + __specializes_schema__ = True + permissions = {'read': ('managers', 'users', 'guests',), 'update': ('managers', 'owners',), 'delete': ('managers', ), @@ -46,11 +54,14 @@ type = String(maxsize=1) whatever = Int() mydate = Date(default='TODAY') - para = String(maxsize=512) shortpara = String(maxsize=64) ecrit_par = SubjectRelation('Personne', constraints=[RQLConstraint('S concerne A, O concerne A')]) attachment = SubjectRelation(('File', 'Image')) +class Text(Para): + __specializes_schema__ = True + summary = String(maxsize=512) + class ecrit_par(RelationType): permissions = {'read': ('managers', 'users', 'guests',), 'delete': ('managers', ), diff -r 88c578819ac1 -r 4ef565c1d183 server/test/unittest_migractions.py --- a/server/test/unittest_migractions.py Tue Aug 25 19:40:20 2009 +0200 +++ b/server/test/unittest_migractions.py Tue Aug 25 19:42:19 2009 +0200 @@ -457,5 +457,47 @@ user.clear_related_cache('in_state', 'subject') self.assertEquals(user.state, 'deactivated') + def test_introduce_base_class(self): + self.mh.cmd_add_entity_type('Para') + self.mh.repo.schema.rebuild_infered_relations() + self.assertEquals(sorted(et.type for et in self.schema['Para'].specialized_by()), + ['Note']) + self.assertEquals(self.schema['Note'].specializes().type, 'Para') + self.mh.cmd_add_entity_type('Text') + self.mh.repo.schema.rebuild_infered_relations() + self.assertEquals(sorted(et.type for et in self.schema['Para'].specialized_by()), + ['Note', 'Text']) + self.assertEquals(self.schema['Text'].specializes().type, 'Para') + # test columns have been actually added + text = self.execute('INSERT Text X: X para "hip", X summary "hop", X newattr "momo"').get_entity(0, 0) + note = self.execute('INSERT Note X: X para "hip", X shortpara "hop", X newattr "momo"').get_entity(0, 0) + aff = self.execute('INSERT Affaire X').get_entity(0, 0) + self.failUnless(self.execute('SET X newnotinlined Y WHERE X eid %(x)s, Y eid %(y)s', + {'x': text.eid, 'y': aff.eid}, 'x')) + self.failUnless(self.execute('SET X newnotinlined Y WHERE X eid %(x)s, Y eid %(y)s', + {'x': note.eid, 'y': aff.eid}, 'x')) + self.failUnless(self.execute('SET X newinlined Y WHERE X eid %(x)s, Y eid %(y)s', + {'x': text.eid, 'y': aff.eid}, 'x')) + self.failUnless(self.execute('SET X newinlined Y WHERE X eid %(x)s, Y eid %(y)s', + {'x': note.eid, 'y': aff.eid}, 'x')) + # XXX remove specializes by ourselves, else tearDown fails when removing + # Para because of Note inheritance. This could be fixed by putting the + # MemSchemaCWETypeDel(session, name) operation in the + # after_delete_entity(CWEType) hook, since in that case the MemSchemaSpecializesDel + # operation would be removed before, but I'm not sure this is a desired behaviour. + # + # also we need more tests about introducing/removing base classes or + # specialization relationship... + self.session.data['rebuild-infered'] = True + try: + self.execute('DELETE X specializes Y WHERE Y name "Para"') + self.commit() + finally: + self.session.data['rebuild-infered'] = False + self.assertEquals(sorted(et.type for et in self.schema['Para'].specialized_by()), + []) + self.assertEquals(self.schema['Note'].specializes(), None) + self.assertEquals(self.schema['Text'].specializes(), None) + if __name__ == '__main__': unittest_main() diff -r 88c578819ac1 -r 4ef565c1d183 web/views/cwuser.py --- a/web/views/cwuser.py Tue Aug 25 19:40:20 2009 +0200 +++ b/web/views/cwuser.py Tue Aug 25 19:42:19 2009 +0200 @@ -11,9 +11,10 @@ from cubicweb.selectors import one_line_rset, implements, match_user_groups from cubicweb.view import EntityView -from cubicweb.web import action +from cubicweb.web import action, uicfg from cubicweb.web.views import primary +uicfg.primaryview_section.tag_attribute(('CWUser', 'login'), 'hidden') class UserPreferencesEntityAction(action.Action): id = 'prefs'