# HG changeset patch # User Sylvain Thénault # Date 1277996797 -7200 # Node ID 9db65b381028a2da447e41852fa7229a1c734b8c # Parent 60880c81e32e32e9fba2b80a2fe2343fd9e1a990# Parent b5640328ffad0e233b5991b81c583d6c179d85d8 backport stable diff -r 60880c81e32e -r 9db65b381028 __pkginfo__.py diff -r 60880c81e32e -r 9db65b381028 debian/control diff -r 60880c81e32e -r 9db65b381028 hooks/security.py --- a/hooks/security.py Wed Jun 30 15:43:36 2010 +0200 +++ b/hooks/security.py Thu Jul 01 17:06:37 2010 +0200 @@ -69,10 +69,12 @@ class _CheckRelationPermissionOp(hook.LateOperation): def precommit_event(self): - rdef = self.rschema.rdef(self.session.describe(self.eidfrom)[0], - self.session.describe(self.eidto)[0]) - rdef.check_perm(self.session, self.action, - fromeid=self.eidfrom, toeid=self.eidto) + session = self.session + for args in session.transaction_data.pop('check_relation_perm_op'): + action, rschema, eidfrom, eidto = args + rdef = rschema.rdef(session.describe(eidfrom)[0], + session.describe(eidto)[0]) + rdef.check_perm(session, action, fromeid=eidfrom, toeid=eidto) def commit_event(self): pass @@ -154,10 +156,9 @@ return rschema = self._cw.repo.schema[self.rtype] if self.rtype in ON_COMMIT_ADD_RELATIONS: - _CheckRelationPermissionOp(self._cw, action='add', - rschema=rschema, - eidfrom=self.eidfrom, - eidto=self.eidto) + hook.set_operation(self._cw, 'check_relation_perm_op', + ('add', rschema, self.eidfrom, self.eidto), + _CheckRelationPermissionOp) else: rdef = rschema.rdef(self._cw.describe(self.eidfrom)[0], self._cw.describe(self.eidto)[0]) diff -r 60880c81e32e -r 9db65b381028 hooks/syncschema.py --- a/hooks/syncschema.py Wed Jun 30 15:43:36 2010 +0200 +++ b/hooks/syncschema.py Thu Jul 01 17:06:37 2010 +0200 @@ -252,12 +252,11 @@ return session = self.session if 'fulltext_container' in self.values: - ftiupdates = session.transaction_data.setdefault( - 'fti_update_etypes', set()) for subjtype, objtype in rschema.rdefs: - ftiupdates.add(subjtype) - ftiupdates.add(objtype) - UpdateFTIndexOp(session) + hook.set_operation(session, 'fti_update_etypes', subjtype, + UpdateFTIndexOp) + hook.set_operation(session, 'fti_update_etypes', objtype, + UpdateFTIndexOp) if not 'inlined' in self.values: return # nothing to do inlined = self.values['inlined'] @@ -367,7 +366,7 @@ sysource = session.pool.source('system') attrtype = y2sql.type_from_constraints( sysource.dbhelper, rdef.object, rdef.constraints) - # XXX should be moved somehow into lgc.adbh: sqlite doesn't support to + # XXX should be moved somehow into lgdb: sqlite doesn't support to # add a new column with UNIQUE, it should be added after the ALTER TABLE # using ADD INDEX if sysource.dbdriver == 'sqlite' and 'UNIQUE' in attrtype: @@ -506,23 +505,21 @@ else: sysource.drop_index(session, table, column) if 'cardinality' in self.values and self.rschema.final: - adbh = session.pool.source('system').dbhelper - if not adbh.alter_column_support: + syssource = session.pool.source('system') + if not syssource.dbhelper.alter_column_support: # not supported (and NOT NULL not set by yams in that case, so - # no worry) + # no worry) XXX (syt) then should we set NOT NULL below ?? return atype = self.rschema.objects(etype)[0] constraints = self.rschema.rdef(etype, atype).constraints - coltype = y2sql.type_from_constraints(adbh, atype, constraints, + coltype = y2sql.type_from_constraints(syssource.dbhelper, atype, constraints, creating=False) # XXX check self.values['cardinality'][0] actually changed? - notnull = self.values['cardinality'][0] != '1' - sql = adbh.sql_set_null_allowed(table, column, coltype, notnull) - session.system_sql(sql) + syssource.set_null_allowed(self.session, table, column, coltype, + self.values['cardinality'][0] != '1') if 'fulltextindexed' in self.values: - UpdateFTIndexOp(session) - session.transaction_data.setdefault( - 'fti_update_etypes', set()).add(etype) + hook.set_operation(session, 'fti_update_etypes', etype, + UpdateFTIndexOp) class SourceDbCWConstraintAdd(hook.Operation): @@ -548,13 +545,12 @@ # alter the physical schema on size constraint changes if newcstr.type() == 'SizeConstraint' and ( oldcstr is None or oldcstr.max != newcstr.max): - adbh = self.session.pool.source('system').dbhelper + syssource = self.session.pool.source('system') card = rtype.rdef(subjtype, objtype).cardinality - coltype = y2sql.type_from_constraints(adbh, objtype, [newcstr], - creating=False) - sql = adbh.sql_change_col_type(table, column, coltype, card != '1') + coltype = y2sql.type_from_constraints(syssource.dbhelper, objtype, + [newcstr], creating=False) try: - session.system_sql(sql, rollback_on_failure=False) + syssource.change_col_type(session, table, column, coltype, card[0] != '1') self.info('altered column %s of table %s: now %s', column, table, coltype) except Exception, ex: @@ -575,13 +571,13 @@ column = SQL_PREFIX + str(self.rdef.rtype) # alter the physical schema on size/unique constraint changes if cstrtype == 'SizeConstraint': + syssource = self.session.pool.source('system') + coltype = y2sql.type_from_constraints(syssource.dbhelper, + self.rdef.object, [], + creating=False) try: - adbh = self.session.pool.source('system').dbhelper - coltype = y2sql.type_from_constraints(adbh, rdef.object, [], - creating=False) - sql = adbh.sql_change_col_type(table, column, coltype, - rdef.cardinality != '1') - self.session.system_sql(sql, rollback_on_failure=False) + syssource.change_col_type(session, table, column, coltype, + self.rdef.cardinality[0] != '1') self.info('altered column %s of table %s: now %s', column, table, coltype) except Exception, ex: @@ -1174,7 +1170,7 @@ def postcommit_event(self): session = self.session source = session.repo.system_source - to_reindex = session.transaction_data.get('fti_update_etypes', ()) + to_reindex = session.transaction_data.pop('fti_update_etypes', ()) self.info('%i etypes need full text indexed reindexation', len(to_reindex)) schema = self.session.repo.vreg.schema diff -r 60880c81e32e -r 9db65b381028 misc/migration/3.8.5_Any.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/misc/migration/3.8.5_Any.py Thu Jul 01 17:06:37 2010 +0200 @@ -0,0 +1,59 @@ +def migrate_varchar_to_nvarchar(): + dbdriver = config.sources()['system']['db-driver'] + if dbdriver != "sqlserver2005": + return + + introspection_sql = """\ +SELECT table_schema, table_name, column_name, is_nullable, character_maximum_length +FROM information_schema.columns +WHERE data_type = 'VARCHAR' and table_name <> 'SYSDIAGRAMS' +""" + has_index_sql = """\ +SELECT i.name AS index_name, + i.type_desc, + i.is_unique, + i.is_unique_constraint +FROM sys.indexes AS i, sys.index_columns as j, sys.columns as k +WHERE is_hypothetical = 0 AND i.index_id <> 0 +AND i.object_id = j.object_id +AND i.index_id = j.index_id +AND i.object_id = OBJECT_ID('%(table)s') +AND k.name = '%(col)s' +AND k.object_id=i.object_id +AND j.column_id = k.column_id;""" + + generated_statements = [] + for schema, table, column, is_nullable, length in sql(introspection_sql, ask_confirm=False): + qualified_table = '[%s].[%s]' % (schema, table) + rset = sql(has_index_sql % {'table': qualified_table, 'col':column}, + ask_confirm = False) + drops = [] + creates = [] + for idx_name, idx_type, idx_unique, is_unique_constraint in rset: + if is_unique_constraint: + drops.append('ALTER TABLE %s DROP CONSTRAINT %s' % (qualified_table, idx_name)) + creates.append('ALTER TABLE %s ADD CONSTRAINT %s UNIQUE (%s)' % (qualified_table, idx_name, column)) + else: + drops.append('DROP INDEX %s ON %s' % (idx_name, qualified_table)) + if idx_unique: + unique = 'UNIQUE' + else: + unique = '' + creates.append('CREATE %s %s INDEX %s ON %s(%s)' % (unique, idx_type, idx_name, qualified_table, column)) + + if length == -1: + length = 'max' + if is_nullable == 'YES': + not_null = 'NULL' + else: + not_null = 'NOT NULL' + alter_sql = 'ALTER TABLE %s ALTER COLUMN %s NVARCHAR(%s) %s' % (qualified_table, column, length, not_null) + generated_statements+= drops + [alter_sql] + creates + + + for statement in generated_statements: + print statement + sql(statement, ask_confirm=False) + commit() + +migrate_varchar_to_nvarchar() diff -r 60880c81e32e -r 9db65b381028 rtags.py --- a/rtags.py Wed Jun 30 15:43:36 2010 +0200 +++ b/rtags.py Thu Jul 01 17:06:37 2010 +0200 @@ -82,16 +82,14 @@ self._tagdefs.clear() def _get_keys(self, stype, rtype, otype, tagged): - keys = [('*', rtype, '*', tagged), - ('*', rtype, otype, tagged), - (stype, rtype, '*', tagged), - (stype, rtype, otype, tagged)] - if stype == '*' or otype == '*': - keys.remove( ('*', rtype, '*', tagged) ) - if stype == '*': - keys.remove( ('*', rtype, otype, tagged) ) - if otype == '*': - keys.remove( (stype, rtype, '*', tagged) ) + keys = [] + if '*' not in (stype, otype): + keys.append(('*', rtype, '*', tagged)) + if '*' != stype: + keys.append(('*', rtype, otype, tagged)) + if '*' != otype: + keys.append((stype, rtype, '*', tagged)) + keys.append((stype, rtype, otype, tagged)) return keys def init(self, schema, check=True): diff -r 60880c81e32e -r 9db65b381028 server/hook.py --- a/server/hook.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/hook.py Thu Jul 01 17:06:37 2010 +0200 @@ -46,8 +46,8 @@ `timestamp` attributes, but *their `_cw` attribute is None*. Session hooks (eg session_open, session_close) have no special attribute. +""" -""" from __future__ import with_statement __docformat__ = "restructuredtext en" @@ -369,10 +369,10 @@ commit / rollback transations. Possible events are: precommit: - the pool is preparing to commit. You shouldn't do anything things which - has to be reverted if the commit fail at this point, but you can freely + the pool is preparing to commit. You shouldn't do anything which + has to be reverted if the commit fails at this point, but you can freely do any heavy computation or raise an exception if the commit can't go. - You can add some new operation during this phase but their precommit + You can add some new operations during this phase but their precommit event won't be triggered commit: @@ -391,6 +391,12 @@ * a commit event failed, all operations which are not been triggered for commit are rollbacked + postcommit: + The transaction is over. All the ORM entities are + invalid. If you need to work on the database, you need to stard + a new transaction, for instance using a new internal_session, + which you will need to commit (and close!). + order of operations may be important, and is controlled according to the insert_index's method output """ diff -r 60880c81e32e -r 9db65b381028 server/pool.py --- a/server/pool.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/pool.py Thu Jul 01 17:06:37 2010 +0200 @@ -19,9 +19,8 @@ connections pools, each of them dealing with a set of connections on each source used by the repository. A connections pools (`ConnectionsPool`) is an abstraction for a group of connection to each source. - +""" -""" __docformat__ = "restructuredtext en" import sys diff -r 60880c81e32e -r 9db65b381028 server/session.py --- a/server/session.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/session.py Thu Jul 01 17:06:37 2010 +0200 @@ -563,11 +563,15 @@ @property def pool(self): """connections pool, set according to transaction mode for each query""" + if self._closed: + self.reset_pool(True) + raise Exception('try to access pool on a closed session') return getattr(self._threaddata, 'pool', None) - def set_pool(self, checkclosed=True): + def set_pool(self): """the session need a pool to execute some queries""" - if checkclosed and self._closed: + if self._closed: + self.reset_pool(True) raise Exception('try to set pool on a closed session') if self.pool is None: # get pool first to avoid race-condition @@ -578,24 +582,34 @@ self._threaddata.pool = None self.repo._free_pool(pool) raise - self._threads_in_transaction.add(threading.currentThread()) + self._threads_in_transaction.add( + (threading.currentThread(), pool) ) return self._threaddata.pool + def _free_thread_pool(self, thread, pool, force_close=False): + try: + self._threads_in_transaction.remove( (thread, pool) ) + except KeyError: + # race condition on pool freeing (freed by commit or rollback vs + # close) + pass + else: + if force_close: + pool.reconnect() + else: + pool.pool_reset() + # free pool once everything is done to avoid race-condition + self.repo._free_pool(pool) + def reset_pool(self, ignoremode=False): """the session is no longer using its pool, at least for some time""" # pool may be none if no operation has been done since last commit # or rollback - if self.pool is not None and (ignoremode or self.mode == 'read'): + pool = getattr(self._threaddata, 'pool', None) + if pool is not None and (ignoremode or self.mode == 'read'): # even in read mode, we must release the current transaction - pool = self.pool - try: - self._threads_in_transaction.remove(threading.currentThread()) - except KeyError: - pass - pool.pool_reset() + self._free_thread_pool(threading.currentThread(), pool) del self._threaddata.pool - # free pool once everything is done to avoid race-condition - self.repo._free_pool(pool) def _touch(self): """update latest session usage timestamp and reset mode to read""" @@ -772,7 +786,9 @@ def rollback(self, reset_pool=True): """rollback the current session's transaction""" - if self.pool is None: + # don't use self.pool, rollback may be called with _closed == True + pool = getattr(self._threaddata, 'pool', None) + if pool is None: self._clear_thread_data() self._touch() self.debug('rollback session %s done (no db activity)', self.id) @@ -787,7 +803,7 @@ except: self.critical('rollback error', exc_info=sys.exc_info()) continue - self.pool.rollback() + pool.rollback() self.debug('rollback for session %s done', self.id) finally: self._touch() @@ -799,7 +815,7 @@ """do not close pool on session close, since they are shared now""" self._closed = True # copy since _threads_in_transaction maybe modified while waiting - for thread in self._threads_in_transaction.copy(): + for thread, pool in self._threads_in_transaction.copy(): if thread is threading.currentThread(): continue self.info('waiting for thread %s', thread) @@ -809,11 +825,12 @@ for i in xrange(10): thread.join(1) if not (thread.isAlive() and - thread in self._threads_in_transaction): + (thread, pool) in self._threads_in_transaction): break else: self.error('thread %s still alive after 10 seconds, will close ' 'session anyway', thread) + self._free_thread_pool(thread, pool, force_close=True) self.rollback() del self.__threaddata del self._tx_data diff -r 60880c81e32e -r 9db65b381028 server/sources/__init__.py diff -r 60880c81e32e -r 9db65b381028 server/sources/extlite.py --- a/server/sources/extlite.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/sources/extlite.py Thu Jul 01 17:06:37 2010 +0200 @@ -16,8 +16,8 @@ # You should have received a copy of the GNU Lesser General Public License along # with CubicWeb. If not, see . """provide an abstract class for external sources using a sqlite database helper +""" -""" __docformat__ = "restructuredtext en" diff -r 60880c81e32e -r 9db65b381028 server/sources/ldapuser.py --- a/server/sources/ldapuser.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/sources/ldapuser.py Thu Jul 01 17:06:37 2010 +0200 @@ -293,7 +293,13 @@ raise AuthenticationError() # check password by establishing a (unused) connection try: - self._connect(user, password) + if password: + self._connect(user, password) + else: + # On Windows + ADAM this would have succeeded (!!!) + # You get Authenticated as: 'NT AUTHORITY\ANONYMOUS LOGON'. + # we really really don't want that + raise Exception('No password provided') except Exception, ex: self.info('while trying to authenticate %s: %s', user, ex) # Something went wrong, most likely bad credentials @@ -553,7 +559,7 @@ self._cache[rec_dn] = rec_dict result.append(rec_dict) #print '--->', result - self.info('ldap built results %s', len(result)) + self.debug('ldap built results %s', len(result)) return result def before_entity_insertion(self, session, lid, etype, eid): diff -r 60880c81e32e -r 9db65b381028 server/sources/native.py --- a/server/sources/native.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/sources/native.py Thu Jul 01 17:06:37 2010 +0200 @@ -275,6 +275,8 @@ if self.dbdriver == 'sqlite': self._create_eid = None self.create_eid = self._create_eid_sqlite + self.binary_to_str = self.dbhelper.dbapi_module.binary_to_str + @property def _sqlcnx(self): @@ -672,9 +674,6 @@ # short cut to method requiring advanced db helper usage ################## - def binary_to_str(self, value): - return self.dbhelper.dbapi_module.binary_to_str(value) - def create_index(self, session, table, column, unique=False): cursor = LogCursor(session.pool[self.uri]) self.dbhelper.create_index(cursor, table, column, unique) @@ -683,6 +682,14 @@ cursor = LogCursor(session.pool[self.uri]) self.dbhelper.drop_index(cursor, table, column, unique) + def change_col_type(self, session, table, column, coltype, null_allowed): + cursor = LogCursor(session.pool[self.uri]) + self.dbhelper.change_col_type(cursor, table, column, coltype, null_allowed) + + def set_null_allowed(self, session, table, column, coltype, null_allowed): + cursor = LogCursor(session.pool[self.uri]) + self.dbhelper.set_null_allowed(cursor, table, column, coltype, null_allowed) + # system source interface ################################################# def eid_type_source(self, session, eid): diff -r 60880c81e32e -r 9db65b381028 server/sources/rql2sql.py --- a/server/sources/rql2sql.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/sources/rql2sql.py Thu Jul 01 17:06:37 2010 +0200 @@ -833,10 +833,11 @@ # if the rhs variable is only linked to this relation, this mean we # only want the relation to exists, eg NOT NULL in case of inlined # relation - if len(rhsvar.stinfo['relations']) == 1 and rhsvar._q_invariant: + if rhsvar._q_invariant: + sql = self._extra_join_sql(relation, lhssql, rhsvar) + if sql: + return sql return '%s IS NOT NULL' % lhssql - if rhsvar._q_invariant: - return self._extra_join_sql(relation, lhssql, rhsvar) return '%s=%s' % (lhssql, rhsvar.accept(self)) def _process_relation_term(self, relation, rid, termvar, termconst, relfield): diff -r 60880c81e32e -r 9db65b381028 server/test/unittest_rql2sql.py --- a/server/test/unittest_rql2sql.py Wed Jun 30 15:43:36 2010 +0200 +++ b/server/test/unittest_rql2sql.py Thu Jul 01 17:06:37 2010 +0200 @@ -272,7 +272,7 @@ ('Any O WHERE NOT S ecrit_par O, S eid 1, S inline1 P, O inline2 P', '''SELECT _O.cw_eid FROM cw_Note AS _S, cw_Personne AS _O -WHERE NOT (_S.cw_ecrit_par=_O.cw_eid) AND _S.cw_eid=1 AND _O.cw_inline2=_S.cw_inline1'''), +WHERE NOT (_S.cw_ecrit_par=_O.cw_eid) AND _S.cw_eid=1 AND _S.cw_inline1 IS NOT NULL AND _O.cw_inline2=_S.cw_inline1'''), ('DISTINCT Any S ORDERBY stockproc(SI) WHERE NOT S ecrit_par O, S para SI', '''SELECT T1.C0 FROM (SELECT DISTINCT _S.cw_eid AS C0, STOCKPROC(_S.cw_para) AS C1 @@ -966,6 +966,12 @@ ] INLINE = [ + + ('Any P WHERE N eid 1, N ecrit_par P, NOT P owned_by P2', + '''SELECT _N.cw_ecrit_par +FROM cw_Note AS _N +WHERE _N.cw_eid=1 AND _N.cw_ecrit_par IS NOT NULL AND NOT (EXISTS(SELECT 1 FROM owned_by_relation AS rel_owned_by0 WHERE _N.cw_ecrit_par=rel_owned_by0.eid_from))'''), + ('Any P, L WHERE N ecrit_par P, P nom L, N eid 0', '''SELECT _P.cw_eid, _P.cw_nom FROM cw_Note AS _N, cw_Personne AS _P @@ -997,9 +1003,10 @@ WHERE NOT (_N.cw_ecrit_par=_P.cw_eid) AND _N.cw_eid=512'''), ('Any S,ES,T WHERE S state_of ET, ET name "CWUser", ES allowed_transition T, T destination_state S', + # XXX "_T.cw_destination_state IS NOT NULL" could be avoided here but it's not worth it '''SELECT _T.cw_destination_state, rel_allowed_transition1.eid_from, _T.cw_eid FROM allowed_transition_relation AS rel_allowed_transition1, cw_Transition AS _T, cw_Workflow AS _ET, state_of_relation AS rel_state_of0 -WHERE _T.cw_destination_state=rel_state_of0.eid_from AND rel_state_of0.eid_to=_ET.cw_eid AND _ET.cw_name=CWUser AND rel_allowed_transition1.eid_to=_T.cw_eid'''), +WHERE _T.cw_destination_state=rel_state_of0.eid_from AND rel_state_of0.eid_to=_ET.cw_eid AND _ET.cw_name=CWUser AND rel_allowed_transition1.eid_to=_T.cw_eid AND _T.cw_destination_state IS NOT NULL'''), ('Any O WHERE S eid 0, S in_state O', '''SELECT _S.cw_in_state diff -r 60880c81e32e -r 9db65b381028 web/test/unittest_uicfg.py --- a/web/test/unittest_uicfg.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/test/unittest_uicfg.py Thu Jul 01 17:06:37 2010 +0200 @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU Lesser General Public License along # with CubicWeb. If not, see . +from logilab.common.testlib import tag from cubicweb.devtools.testlib import CubicWebTC from cubicweb.web import uicfg @@ -25,6 +26,57 @@ def test_default_actionbox_appearsin_addmenu_config(self): self.failIf(abaa.etype_get('TrInfo', 'wf_info_for', 'object', 'CWUser')) + + +class DefinitionOrderTC(CubicWebTC): + """This test check that when multiple definition could match a key, only + the more accurate apply""" + + def setUp(self): + + new_def = ( + (('*', 'login', '*'), + {'formtype':'main', 'section':'hidden'}), + (('*', 'login', '*'), + {'formtype':'muledit', 'section':'hidden'}), + (('CWUser', 'login', '*'), + {'formtype':'main', 'section':'attributes'}), + (('CWUser', 'login', '*'), + {'formtype':'muledit', 'section':'attributes'}), + (('CWUser', 'login', 'String'), + {'formtype':'main', 'section':'inlined'}), + (('CWUser', 'login', 'String'), + {'formtype':'inlined', 'section':'attributes'}), + ) + self._old_def = [] + + for key, kwargs in new_def: + nkey = key[0], key[1], key[2], 'subject' + self._old_def.append((nkey, uicfg.autoform_section._tagdefs.get(nkey))) + uicfg.autoform_section.tag_subject_of(key, **kwargs) + + super(DefinitionOrderTC, self).setUp() + + + @tag('uicfg') + def test_definition_order_hidden(self): + result = uicfg.autoform_section.get('CWUser', 'login', 'String', 'subject') + expected = set(['main_inlined', 'muledit_attributes', 'inlined_attributes']) + self.assertSetEquals(result, expected) + + def tearDown(self): + super(DefinitionOrderTC, self).tearDown() + for key, tags in self._old_def: + if tags is None: + uicfg.autoform_section.del_rtag(*key) + else: + for tag in tags: + formtype, section = tag.split('_') + uicfg.autoform_section.tag_subject_of(key[:3], formtype=formtype, section=section) + + uicfg.autoform_section.clear() + uicfg.autoform_section.init(self.repo.vreg.schema) + if __name__ == '__main__': from logilab.common.testlib import unittest_main unittest_main() diff -r 60880c81e32e -r 9db65b381028 web/uicfg.py --- a/web/uicfg.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/uicfg.py Thu Jul 01 17:06:37 2010 +0200 @@ -284,8 +284,19 @@ rtags.add('%s_%s' % (formtype, section)) return rtags - def init_get(self, *key): - return super(AutoformSectionRelationTags, self).get(*key) + def init_get(self, stype, rtype, otype, tagged): + key = (stype, rtype, otype, tagged) + rtags = {} + for key in self._get_keys(stype, rtype, otype, tagged): + tags = self._tagdefs.get(key, ()) + for tag in tags: + assert '_' in tag, (tag, tags) + section, value = tag.split('_', 1) + rtags[section] = value + cls = self.tag_container_cls + rtags = cls('_'.join([section,value]) for section,value in rtags.iteritems()) + return rtags + def get(self, *key): # overriden to avoid recomputing done in parent classes diff -r 60880c81e32e -r 9db65b381028 web/views/autoform.py --- a/web/views/autoform.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/views/autoform.py Thu Jul 01 17:06:37 2010 +0200 @@ -238,13 +238,29 @@ self.peid, self.rtype, entity.eid) self.render_form(i18nctx, divonclick=divonclick, **kwargs) + def _get_removejs(self): + """ + Don't display the remove link in edition form if the + cardinality is 1. Handled in InlineEntityCreationFormView for + creation form. + """ + entity = self._entity() + if isinstance(self.peid, int): + pentity = self._cw.entity_from_eid(self.peid) + petype = pentity.e_schema.type + rdef = entity.e_schema.rdef(self.rtype, neg_role(self.role), petype) + card= rdef.role_cardinality(self.role) + if card == '1': # don't display remove link + return None + return self.removejs and self.removejs % ( + self.peid, self.rtype, entity.eid) + def render_form(self, i18nctx, **kwargs): """fetch and render the form""" entity = self._entity() divid = '%s-%s-%s' % (self.peid, self.rtype, entity.eid) title = self.form_title(entity, i18nctx) - removejs = self.removejs and self.removejs % ( - self.peid, self.rtype, entity.eid) + removejs = self._get_removejs() countkey = '%s_count' % self.rtype try: self._cw.data[countkey] += 1 diff -r 60880c81e32e -r 9db65b381028 web/views/basetemplates.py --- a/web/views/basetemplates.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/views/basetemplates.py Thu Jul 01 17:06:37 2010 +0200 @@ -483,7 +483,7 @@ if cw.vreg.config['allow-email-login']: label = cw._('login or email') else: - label = cw._('login') + label = cw.pgettext('CWUser', 'login') form.field_by_name('__login').label = label self.w(form.render(table_class='', display_progress_div=False)) cw.html_headers.add_onload('jQuery("#__login:visible").focus()') diff -r 60880c81e32e -r 9db65b381028 web/views/boxes.py --- a/web/views/boxes.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/views/boxes.py Thu Jul 01 17:06:37 2010 +0200 @@ -15,8 +15,7 @@ # # You should have received a copy of the GNU Lesser General Public License along # with CubicWeb. If not, see . -""" -generic boxes for CubicWeb web client: +"""Generic boxes for CubicWeb web client: * actions box * possible views box @@ -24,8 +23,8 @@ additional (disabled by default) boxes * schema box * startup views box +""" -""" __docformat__ = "restructuredtext en" _ = unicode @@ -185,7 +184,6 @@ for view in self._cw.vreg['views'].possible_views(self._cw, None): if view.category == 'startupview': box.append(self.box_action(view)) - if not box.is_empty(): box.render(self.w) diff -r 60880c81e32e -r 9db65b381028 web/views/debug.py --- a/web/views/debug.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/views/debug.py Thu Jul 01 17:06:37 2010 +0200 @@ -15,10 +15,8 @@ # # You should have received a copy of the GNU Lesser General Public License along # with CubicWeb. If not, see . -"""management and error screens +"""management and error screens""" - -""" __docformat__ = "restructuredtext en" from time import strftime, localtime @@ -45,6 +43,7 @@ __select__ = none_rset() & match_user_groups('managers') title = _('server information') + cache_max_age = 0 def call(self, **kwargs): req = self._cw @@ -128,6 +127,7 @@ __regid__ = 'registry' __select__ = StartupView.__select__ & match_user_groups('managers') title = _('registry') + cache_max_age = 0 def call(self, **kwargs): self.w(u'

%s

' % _("Registry's content")) @@ -150,6 +150,7 @@ __regid__ = 'gc' __select__ = StartupView.__select__ & match_user_groups('managers') title = _('memory leak debugging') + cache_max_age = 0 def call(self, **kwargs): from cubicweb._gcdebug import gc_info diff -r 60880c81e32e -r 9db65b381028 web/views/formrenderers.py --- a/web/views/formrenderers.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/views/formrenderers.py Thu Jul 01 17:06:37 2010 +0200 @@ -397,10 +397,6 @@ _options = FormRenderer._options + ('main_form_title',) main_form_title = _('main informations') - def render(self, form, values): - rendered = super(EntityFormRenderer, self).render(form, values) - return rendered + u'' # close extra div introducted by open_form - def open_form(self, form, values): attrs_fs_label = '' if self.main_form_title: @@ -409,6 +405,13 @@ attrs_fs_label += '
' return attrs_fs_label + super(EntityFormRenderer, self).open_form(form, values) + def close_form(self, form, values): + """seems dumb but important for consistency w/ close form, and necessary + for form renderers overriding open_form to use something else or more than + and
+ """ + return super(EntityFormRenderer, self).close_form(form, values) + '
' + def render_buttons(self, w, form): if len(form.form_buttons) == 3: w(""" diff -r 60880c81e32e -r 9db65b381028 web/views/startup.py --- a/web/views/startup.py Wed Jun 30 15:43:36 2010 +0200 +++ b/web/views/startup.py Thu Jul 01 17:06:37 2010 +0200 @@ -17,8 +17,8 @@ # with CubicWeb. If not, see . """Set of HTML startup views. A startup view is global, e.g. doesn't apply to a result set. +""" -""" __docformat__ = "restructuredtext en" _ = unicode