--- a/.hgtags Fri Jun 18 18:31:22 2010 +0200
+++ b/.hgtags Mon Jun 21 13:23:11 2010 +0200
@@ -131,3 +131,5 @@
1ccaa924786047be66b44f6dbc76e6631f56b04a cubicweb-debian-version-3.8.3-1
d00d1fab42afec8607fc84d862becfd7f58850f1 cubicweb-version-3.8.4
b7883287f40c853e8278edc3f24326f2c9549954 cubicweb-debian-version-3.8.4-1
+2de32c0c293ba451b231efe77d6027376af3a2a3 cubicweb-version-3.8.5
+5d05b08adeab1ea301e49ed8537e35ede6db92f6 cubicweb-debian-version-3.8.5-1
--- a/__pkginfo__.py Fri Jun 18 18:31:22 2010 +0200
+++ b/__pkginfo__.py Mon Jun 21 13:23:11 2010 +0200
@@ -22,7 +22,7 @@
modname = distname = "cubicweb"
-numversion = (3, 8, 4)
+numversion = (3, 8, 5)
version = '.'.join(str(num) for num in numversion)
description = "a repository of entities / relations for knowledge management"
@@ -43,7 +43,7 @@
'logilab-common': '>= 0.50.2',
'logilab-mtconverter': '>= 0.8.0',
'rql': '>= 0.26.2',
- 'yams': '>= 0.30.0',
+ 'yams': '>= 0.29.1',
'docutils': '>= 0.6',
#gettext # for xgettext, msgcat, etc...
# web dependancies
--- a/cwctl.py Fri Jun 18 18:31:22 2010 +0200
+++ b/cwctl.py Mon Jun 21 13:23:11 2010 +0200
@@ -730,7 +730,9 @@
if cubicwebversion > applcubicwebversion:
toupgrade.append(('cubicweb', applcubicwebversion, cubicwebversion))
if not self.config.fs_only and not toupgrade:
- print '-> no software migration needed for instance %s.' % appid
+ print '-> no data migration needed for instance %s.' % appid
+ self.i18nupgrade(config)
+ mih.shutdown()
return
for cube, fromversion, toversion in toupgrade:
print '-> migration needed from %s to %s for %s' % (fromversion, toversion, cube)
@@ -741,6 +743,22 @@
mih.migrate(vcconf, reversed(toupgrade), self.config)
# rewrite main configuration file
mih.rewrite_configuration()
+ mih.shutdown()
+ # handle i18n upgrade
+ if not self.i18nupgrade(config):
+ return
+ print
+ print '-> instance migrated.'
+ if not (CWDEV or self.config.nostartstop):
+ # restart instance through fork to get a proper environment, avoid
+ # uicfg pb (and probably gettext catalogs, to check...)
+ forkcmd = '%s start %s' % (sys.argv[0], appid)
+ status = system(forkcmd)
+ if status:
+ print '%s exited with status %s' % (forkcmd, status)
+ print
+
+ def i18nupgrade(self, config):
# handle i18n upgrade:
# * install new languages
# * recompile catalogs
@@ -752,21 +770,10 @@
if errors:
print '\n'.join(errors)
if not ASK.confirm('Error while compiling message catalogs, '
- 'continue anyway ?'):
+ 'continue anyway?'):
print '-> migration not completed.'
- return
- mih.shutdown()
- print
- print '-> instance migrated.'
- if not (CWDEV or self.config.nostartstop):
- # restart instance through fork to get a proper environment, avoid
- # uicfg pb (and probably gettext catalogs, to check...)
- forkcmd = '%s start %s' % (sys.argv[0], appid)
- status = system(forkcmd)
- if status:
- print '%s exited with status %s' % (forkcmd, status)
- print
-
+ return False
+ return True
class ShellCommand(Command):
"""Run an interactive migration shell on an instance. This is a python shell
--- a/debian/changelog Fri Jun 18 18:31:22 2010 +0200
+++ b/debian/changelog Mon Jun 21 13:23:11 2010 +0200
@@ -1,3 +1,9 @@
+cubicweb (3.8.5-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 21 Jun 2010 10:42:01 +0200
+
cubicweb (3.8.4-1) unstable; urgency=low
* new upstream release
--- a/debian/control Fri Jun 18 18:31:22 2010 +0200
+++ b/debian/control Mon Jun 21 13:23:11 2010 +0200
@@ -97,7 +97,11 @@
Package: cubicweb-common
Architecture: all
XB-Python-Version: ${python:Versions}
-Depends: ${python:Depends}, graphviz, gettext, python-logilab-mtconverter (>= 0.8.0), python-logilab-common (>= 0.50.2), python-yams (>= 0.30.0), python-rql (>= 0.26.2), python-lxml
+<<<<<<< /home/syt/src/fcubicweb/cubicweb/debian/control
+Depends: ${python:Depends}, graphviz, gettext, python-logilab-mtconverter (>= 0.8.0), python-logilab-common (>= 0.50.2), python-yams (>= 0.29.1), python-rql (>= 0.26.2), python-lxml
+=======
+Depends: ${python:Depends}, graphviz, gettext, python-logilab-mtconverter (>= 0.6.0), python-logilab-common (>= 0.50.2), python-yams (>= 0.29.0), python-rql (>= 0.26.3), python-lxml
+>>>>>>> /tmp/control~other.bzaFre
Recommends: python-simpletal (>= 4.0), python-crypto
Conflicts: cubicweb-core
Replaces: cubicweb-core
--- a/debian/cubicweb-ctl.logrotate Fri Jun 18 18:31:22 2010 +0200
+++ b/debian/cubicweb-ctl.logrotate Mon Jun 21 13:23:11 2010 +0200
@@ -9,9 +9,9 @@
sharedscripts
postrotate
if [ -x /usr/sbin/invoke-rc.d ]; then \
- invoke-rc.d cubicweb reload > /dev/null; \
+ invoke-rc.d cubicweb reload > /dev/null 2> &1; \
else \
- /etc/init.d/cubicweb reload > /dev/null; \
+ /etc/init.d/cubicweb reload > /dev/null 2> &1; \
fi; \
endscript
}
--- a/devtools/fill.py Fri Jun 18 18:31:22 2010 +0200
+++ b/devtools/fill.py Mon Jun 21 13:23:11 2010 +0200
@@ -16,9 +16,8 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""This modules defines func / methods for creating test repositories
+"""This modules defines func / methods for creating test repositories"""
-"""
__docformat__ = "restructuredtext en"
from random import randint, choice
--- a/devtools/testlib.py Fri Jun 18 18:31:22 2010 +0200
+++ b/devtools/testlib.py Mon Jun 21 13:23:11 2010 +0200
@@ -218,7 +218,10 @@
if not 'repo' in cls.__dict__:
cls._build_repo()
else:
- cls.cnx.rollback()
+ try:
+ cls.cnx.rollback()
+ except ProgrammingError:
+ pass
cls._refresh_repo()
@classmethod
@@ -280,6 +283,8 @@
MAILBOX[:] = [] # reset mailbox
def tearDown(self):
+ if not self.cnx._closed:
+ self.cnx.rollback()
for cnx in self._cnxs:
if not cnx._closed:
cnx.close()
@@ -330,11 +335,12 @@
def restore_connection(self):
if not self.cnx is self._orig_cnx[0]:
+ if not self.cnx._closed:
+ self.cnx.close()
try:
- self.cnx.close()
self._cnxs.remove(self.cnx)
- except ProgrammingError:
- pass # already closed
+ except ValueError:
+ pass
self.cnx, self.websession = self._orig_cnx
# db api ##################################################################
@@ -366,7 +372,7 @@
try:
self.cnx.rollback()
except ProgrammingError:
- pass
+ pass # connection closed
finally:
self.session.set_pool() # ensure pool still set after commit
--- a/doc/book/en/annexes/faq.rst Fri Jun 18 18:31:22 2010 +0200
+++ b/doc/book/en/annexes/faq.rst Mon Jun 21 13:23:11 2010 +0200
@@ -463,6 +463,13 @@
You can find additional information in the section :ref:`securitymodel`.
+Is it possible to bypass security from the UI (web front) part ?
+----------------------------------------------------------------
+
+No.
+
+Only Hooks/Operations can do that.
+
Can PostgreSQL and CubicWeb authentication work with kerberos ?
----------------------------------------------------------------
--- a/hooks/metadata.py Fri Jun 18 18:31:22 2010 +0200
+++ b/hooks/metadata.py Mon Jun 21 13:23:11 2010 +0200
@@ -15,12 +15,10 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Core hooks: set generic metadata
+"""Core hooks: set generic metadata"""
-"""
__docformat__ = "restructuredtext en"
-
from datetime import datetime
from cubicweb.selectors import implements
--- a/hooks/syncschema.py Fri Jun 18 18:31:22 2010 +0200
+++ b/hooks/syncschema.py Mon Jun 21 13:23:11 2010 +0200
@@ -21,8 +21,8 @@
- perform physical update on the source when necessary
checking for schema consistency is done in hooks.py
+"""
-"""
__docformat__ = "restructuredtext en"
from copy import copy
@@ -84,7 +84,7 @@
table = SQL_PREFIX + etype
column = SQL_PREFIX + rtype
try:
- session.system_sql(str('ALTER TABLE %s ADD COLUMN %s integer'
+ session.system_sql(str('ALTER TABLE %s ADD %s integer'
% (table, column)), rollback_on_failure=False)
session.info('added column %s to table %s', column, table)
except:
@@ -145,15 +145,17 @@
table = column = None # make pylint happy
def precommit_event(self):
session, table, column = self.session, self.table, self.column
+ source = session.repo.system_source
# drop index if any
- session.pool.source('system').drop_index(session, table, column)
- try:
+ source.drop_index(session, table, column)
+ if source.dbhelper.alter_column_support:
session.system_sql('ALTER TABLE %s DROP COLUMN %s'
% (table, column), rollback_on_failure=False)
self.info('dropped column %s from table %s', column, table)
- except Exception, ex:
+ else:
# not supported by sqlite for instance
- self.error('error while altering table %s: %s', table, ex)
+ self.error('dropping column not supported by the backend, handle '
+ 'it yourself (%s.%s)', table, column)
# base operations for in-memory schema synchronization ########################
@@ -284,9 +286,10 @@
sqlexec('INSERT INTO %s_relation SELECT %s, %s FROM %s WHERE NOT %s IS NULL'
% (rtype, eidcolumn, column, table, column))
# drop existant columns
+ #if session.repo.system_source.dbhelper.alter_column_support:
for etype in rschema.subjects():
DropColumn(session, table=SQL_PREFIX + str(etype),
- column=SQL_PREFIX + rtype)
+ column=SQL_PREFIX + rtype)
else:
for etype in rschema.subjects():
try:
@@ -377,7 +380,7 @@
table = SQL_PREFIX + rdef.subject
column = SQL_PREFIX + rdef.name
try:
- session.system_sql(str('ALTER TABLE %s ADD COLUMN %s %s'
+ session.system_sql(str('ALTER TABLE %s ADD %s %s'
% (table, column, attrtype)),
rollback_on_failure=False)
self.info('added column %s to table %s', table, column)
@@ -552,8 +555,8 @@
sql = adbh.sql_change_col_type(table, column, coltype, card != '1')
try:
session.system_sql(sql, rollback_on_failure=False)
- self.info('altered column %s of table %s: now VARCHAR(%s)',
- column, table, newcstr.max)
+ self.info('altered column %s of table %s: now %s',
+ column, table, coltype)
except Exception, ex:
# not supported by sqlite for instance
self.error('error while altering table %s: %s', table, ex)
@@ -568,16 +571,19 @@
def precommit_event(self):
cstrtype = self.cstr.type()
- table = SQL_PREFIX + str(self.subjtype)
- column = SQL_PREFIX + str(self.rtype)
+ table = SQL_PREFIX + str(self.rdef.subject)
+ column = SQL_PREFIX + str(self.rdef.rtype)
# alter the physical schema on size/unique constraint changes
if cstrtype == 'SizeConstraint':
try:
- self.session.system_sql('ALTER TABLE %s ALTER COLUMN %s TYPE TEXT'
- % (table, column),
- rollback_on_failure=False)
- self.info('altered column %s of table %s: now TEXT',
- column, table)
+ adbh = self.session.pool.source('system').dbhelper
+ coltype = y2sql.type_from_constraints(adbh, rdef.object, [],
+ creating=False)
+ sql = adbh.sql_change_col_type(table, column, coltype,
+ rdef.cardinality != '1')
+ self.session.system_sql(sql, rollback_on_failure=False)
+ self.info('altered column %s of table %s: now %s',
+ column, table, coltype)
except Exception, ex:
# not supported by sqlite for instance
self.error('error while altering table %s: %s', table, ex)
@@ -1112,8 +1118,7 @@
except IndexError:
self._cw.critical('constraint type no more accessible')
else:
- SourceDbCWConstraintDel(self._cw, cstr=cstr,
- subjtype=rdef.subject, rtype=rdef.rtype)
+ SourceDbCWConstraintDel(self._cw, rdef=rdef, cstr=cstr)
MemSchemaCWConstraintDel(self._cw, rdef=rdef, cstr=cstr)
--- a/schemas/bootstrap.py Fri Jun 18 18:31:22 2010 +0200
+++ b/schemas/bootstrap.py Mon Jun 21 13:23:11 2010 +0200
@@ -16,8 +16,8 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""core CubicWeb schema necessary for bootstrapping the actual instance's schema
+"""
-"""
__docformat__ = "restructuredtext en"
_ = unicode
--- a/server/mssteps.py Fri Jun 18 18:31:22 2010 +0200
+++ b/server/mssteps.py Mon Jun 21 13:23:11 2010 +0200
@@ -21,8 +21,8 @@
* get data from the parent plan, the latest step, temporary table...
* each step has is own members (this is not necessarily bad, but a bit messy
for now)
+"""
-"""
__docformat__ = "restructuredtext en"
from rql.nodes import VariableRef, Variable, Function
@@ -37,11 +37,11 @@
for select in union.children:
if keepgroup:
having, orderby = select.having, select.orderby
- select.having, select.orderby = None, None
+ select.having, select.orderby = (), ()
clauses.append( (having, orderby) )
else:
groupby, having, orderby = select.groupby, select.having, select.orderby
- select.groupby, select.having, select.orderby = None, None, None
+ select.groupby, select.having, select.orderby = (), (), ()
clauses.append( (groupby, having, orderby) )
return clauses
--- a/server/repository.py Fri Jun 18 18:31:22 2010 +0200
+++ b/server/repository.py Mon Jun 21 13:23:11 2010 +0200
@@ -1095,8 +1095,6 @@
orig_edited_attributes = getattr(entity, 'edited_attributes', None)
entity.edited_attributes = edited_attributes
try:
- if session.is_hook_category_activated('integrity'):
- entity._cw_check()
only_inline_rels, need_fti_update = True, False
relations = []
source = self.source_from_eid(entity.eid, session)
@@ -1127,6 +1125,8 @@
eidfrom=entity.eid, rtype=attr, eidto=value)
if not only_inline_rels:
hm.call_hooks('before_update_entity', session, entity=entity)
+ if session.is_hook_category_activated('integrity'):
+ entity._cw_check()
source.update_entity(session, entity)
self.system_source.update_info(session, entity, need_fti_update)
if source.should_call_hooks:
--- a/server/session.py Fri Jun 18 18:31:22 2010 +0200
+++ b/server/session.py Mon Jun 21 13:23:11 2010 +0200
@@ -131,8 +131,7 @@
self.user = user
self.repo = repo
self.cnxtype = cnxprops.cnxtype
- self.creation = time()
- self.timestamp = self.creation
+ self.timestamp = time()
self.default_mode = 'read'
# support undo for Create Update Delete entity / Add Remove relation
if repo.config.creating or repo.config.repairing or self.is_internal_session:
@@ -286,8 +285,10 @@
try:
return source.doexec(self, sql, args, rollback=rollback_on_failure)
except (source.OperationalError, source.InterfaceError):
+ if not rollback_on_failure:
+ raise
source.warning("trying to reconnect")
- self.pool.reconnect(self)
+ self.pool.reconnect(source)
return source.doexec(self, sql, args, rollback=rollback_on_failure)
def set_language(self, language):
@@ -642,6 +643,7 @@
if eid_key is not None:
warn('[3.8] eid_key is deprecated, you can safely remove this argument',
DeprecationWarning, stacklevel=2)
+ self.timestamp = time() # update timestamp
rset = self._execute(self, rql, kwargs, build_descr)
rset.req = self
return rset
--- a/server/sources/ldapuser.py Fri Jun 18 18:31:22 2010 +0200
+++ b/server/sources/ldapuser.py Mon Jun 21 13:23:11 2010 +0200
@@ -242,7 +242,7 @@
if emailaddr == ldapemailaddr:
break
else:
- self.info('updating email address of user %s to %s',
+ self.debug('updating email address of user %s to %s',
extid, ldapemailaddr)
emailrset = execute('EmailAddress A WHERE A address %(addr)s',
{'addr': ldapemailaddr})
@@ -504,7 +504,7 @@
def _search(self, session, base, scope,
searchstr='(objectClass=*)', attrs=()):
"""make an ldap query"""
- self.info('ldap search %s %s %s %s %s', self.uri, base, scope, searchstr, list(attrs))
+ self.debug('ldap search %s %s %s %s %s', self.uri, base, scope, searchstr, list(attrs))
cnx = session.pool.connection(self.uri).cnx
try:
res = cnx.search_s(base, scope, searchstr, attrs)
@@ -520,12 +520,12 @@
self.repo.delete_info(session, entity, self.uri, base)
self.reset_cache()
return []
-## except ldap.REFERRAL, e:
-## cnx = self.handle_referral(e)
-## try:
-## res = cnx.search_s(base, scope, searchstr, attrs)
-## except ldap.PARTIAL_RESULTS:
-## res_type, res = cnx.result(all=0)
+ # except ldap.REFERRAL, e:
+ # cnx = self.handle_referral(e)
+ # try:
+ # res = cnx.search_s(base, scope, searchstr, attrs)
+ # except ldap.PARTIAL_RESULTS:
+ # res_type, res = cnx.result(all=0)
result = []
for rec_dn, rec_dict in res:
# When used against Active Directory, "rec_dict" may not be
@@ -553,7 +553,7 @@
self._cache[rec_dn] = rec_dict
result.append(rec_dict)
#print '--->', result
- self.info('ldap built results %s', result)
+ self.info('ldap built results %s', len(result))
return result
def before_entity_insertion(self, session, lid, etype, eid):
@@ -564,7 +564,7 @@
This method must return the an Entity instance representation of this
entity.
"""
- self.info('ldap before entity insertion')
+ self.debug('ldap before entity insertion')
entity = super(LDAPUserSource, self).before_entity_insertion(session, lid, etype, eid)
res = self._search(session, lid, BASE)[0]
for attr in entity.e_schema.indexable_attributes():
@@ -575,7 +575,7 @@
"""called by the repository after an entity stored here has been
inserted in the system table.
"""
- self.info('ldap after entity insertion')
+ self.debug('ldap after entity insertion')
super(LDAPUserSource, self).after_entity_insertion(session, dn, entity)
for group in self.user_default_groups:
session.execute('SET X in_group G WHERE X eid %(x)s, G name %(group)s',
--- a/server/sources/rql2sql.py Fri Jun 18 18:31:22 2010 +0200
+++ b/server/sources/rql2sql.py Mon Jun 21 13:23:11 2010 +0200
@@ -45,9 +45,8 @@
and Informix.
.. _Comparison of different SQL implementations: http://www.troels.arvin.dk/db/rdbms
-
+"""
-"""
__docformat__ = "restructuredtext en"
import threading
@@ -56,8 +55,8 @@
from rql import BadRQLQuery, CoercionError
from rql.stmts import Union, Select
-from rql.nodes import (SortTerm, VariableRef, Constant, Function, Not,
- Variable, ColumnAlias, Relation, SubQuery, Exists)
+from rql.nodes import (SortTerm, VariableRef, Constant, Function, Variable, Or,
+ Not, Comparison, ColumnAlias, Relation, SubQuery, Exists)
from cubicweb import QueryError
from cubicweb.server.sqlutils import SQL_PREFIX
@@ -397,6 +396,49 @@
self.restrictions = self._restr_stack.pop()
return restrictions, self.actual_tables.pop()
+def extract_fake_having_terms(having):
+ """RQL's HAVING may be used to contains stuff that should go in the WHERE
+ clause of the SQL query, due to RQL grammar limitation. Split them...
+
+ Return a list nodes that can be ANDed with query's WHERE clause. Having
+ subtrees updated in place.
+ """
+ fakehaving = []
+ for subtree in having:
+ ors, tocheck = set(), []
+ for compnode in subtree.get_nodes(Comparison):
+ for fnode in compnode.get_nodes(Function):
+ if fnode.descr().aggregat:
+ p = compnode.parent
+ oor = None
+ while not isinstance(p, Select):
+ if isinstance(p, Or):
+ oor = p
+ p = p.parent
+ if oor is not None:
+ ors.add(oor)
+ break
+ else:
+ tocheck.append(compnode)
+ # tocheck hold a set of comparison not implying an aggregat function
+ # put them in fakehaving if the don't share an Or node as ancestor
+ # with another comparison containing an aggregat function
+ for compnode in tocheck:
+ parents = set()
+ p = compnode.parent
+ oor = None
+ while not isinstance(p, Select):
+ if p in ors:
+ break
+ if isinstance(p, Or):
+ oor = p
+ p = p.parent
+ else:
+ node = oor or compnode
+ if not node in fakehaving:
+ fakehaving.append(node)
+ compnode.parent.remove(node)
+ return fakehaving
class SQLGenerator(object):
"""
@@ -494,6 +536,7 @@
sorts = select.orderby
groups = select.groupby
having = select.having
+ morerestr = extract_fake_having_terms(having)
# remember selection, it may be changed and have to be restored
origselection = select.selection[:]
# check if the query will have union subquery, if it need sort term
@@ -545,7 +588,8 @@
self._in_wrapping_query = False
self._state = state
try:
- sql = self._solutions_sql(select, sols, distinct, needalias or needwrap)
+ sql = self._solutions_sql(select, morerestr, sols, distinct,
+ needalias or needwrap)
# generate groups / having before wrapping query selection to
# get correct column aliases
self._in_wrapping_query = needwrap
@@ -610,13 +654,15 @@
except KeyError:
continue
- def _solutions_sql(self, select, solutions, distinct, needalias):
+ def _solutions_sql(self, select, morerestr, solutions, distinct, needalias):
sqls = []
for solution in solutions:
self._state.reset(solution)
# visit restriction subtree
if select.where is not None:
self._state.add_restriction(select.where.accept(self))
+ for restriction in morerestr:
+ self._state.add_restriction(restriction.accept(self))
sql = [self._selection_sql(select.selection, distinct, needalias)]
if self._state.restrictions:
sql.append('WHERE %s' % ' AND '.join(self._state.restrictions))
@@ -1055,7 +1101,8 @@
operator = mexpr.operator
try:
if mexpr.operator == '+' and mexpr.get_type(self._state.solution, self._args) == 'String':
- operator = '||'
+ return '(%s)' % self.dbhelper.sql_concat_string(lhs.accept(self),
+ rhs.accept(self))
except CoercionError:
pass
return '(%s %s %s)'% (lhs.accept(self), operator, rhs.accept(self))
--- a/server/test/unittest_querier.py Fri Jun 18 18:31:22 2010 +0200
+++ b/server/test/unittest_querier.py Mon Jun 21 13:23:11 2010 +0200
@@ -510,6 +510,21 @@
self.assertEquals(len(rset.rows), 1)
self.assertEquals(rset.rows[0][0], self.ueid)
+ def test_select_having_non_aggregat_1(self):
+ rset = self.execute('Any L WHERE X login L, X creation_date CD '
+ 'HAVING YEAR(CD) = %s' % date.today().year)
+ self.assertListEquals(rset.rows,
+ [[u'admin'],
+ [u'anon']])
+
+ def test_select_having_non_aggregat_2(self):
+ rset = self.execute('Any L GROUPBY L WHERE X login L, X in_group G, '
+ 'X creation_date CD HAVING YEAR(CD) = %s OR COUNT(G) > 1'
+ % date.today().year)
+ self.assertListEquals(rset.rows,
+ [[u'admin'],
+ [u'anon']])
+
def test_select_complex_sort(self):
"""need sqlite including http://www.sqlite.org/cvstrac/tktview?tn=3773 fix"""
rset = self.execute('Any X ORDERBY X,D LIMIT 5 WHERE X creation_date D')
--- a/server/test/unittest_rql2sql.py Fri Jun 18 18:31:22 2010 +0200
+++ b/server/test/unittest_rql2sql.py Mon Jun 21 13:23:11 2010 +0200
@@ -1443,6 +1443,12 @@
self.o.attr_map.clear()
+ def test_concat_string(self):
+ self._check('Any "A"+R WHERE X ref R',
+ '''SELECT (A || _X.cw_ref)
+FROM cw_Affaire AS _X''')
+
+
class SqliteSQLGeneratorTC(PostgresSQLGeneratorTC):
backend = 'sqlite'
--- a/web/request.py Fri Jun 18 18:31:22 2010 +0200
+++ b/web/request.py Mon Jun 21 13:23:11 2010 +0200
@@ -20,7 +20,12 @@
__docformat__ = "restructuredtext en"
import Cookie
-import hashlib
+try:
+ import hashlib
+ hashlib.hash
+except AttributeError:
+ # python 2.5 ...
+ import sha as hashlib
import time
import random
import base64
--- a/web/test/unittest_session.py Fri Jun 18 18:31:22 2010 +0200
+++ b/web/test/unittest_session.py Mon Jun 21 13:23:11 2010 +0200
@@ -19,14 +19,18 @@
self.assertEquals(self.websession.sessionid, self.websession.cnx.sessionid)
# fake the repo session is expiring
self.repo.close(sessionid)
- # fake an incoming http query with sessionid in session cookie
- # don't use self.request() which try to call req.set_session
- req = self.requestcls(self.vreg)
- websession = sm.get_session(req, sessionid)
- self.assertEquals(len(sm._sessions), 1)
- self.assertIs(websession, self.websession)
- self.assertEquals(websession.sessionid, sessionid)
- self.assertNotEquals(websession.sessionid, websession.cnx.sessionid)
+ try:
+ # fake an incoming http query with sessionid in session cookie
+ # don't use self.request() which try to call req.set_session
+ req = self.requestcls(self.vreg)
+ websession = sm.get_session(req, sessionid)
+ self.assertEquals(len(sm._sessions), 1)
+ self.assertIs(websession, self.websession)
+ self.assertEquals(websession.sessionid, sessionid)
+ self.assertNotEquals(websession.sessionid, websession.cnx.sessionid)
+ finally:
+ # avoid error in tearDown by telling this connection is closed...
+ self.cnx._closed = True
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/web/views/actions.py Fri Jun 18 18:31:22 2010 +0200
+++ b/web/views/actions.py Mon Jun 21 13:23:11 2010 +0200
@@ -275,11 +275,11 @@
continue
if role == 'subject':
label = 'add %s %s %s %s' % (eschema, rschema, teschema, role)
- url = self.linkto_url(entity, rschema, teschema, 'object')
+ url = self.linkto_url(entity, rschema, teschema, 'object', **params)
else:
label = 'add %s %s %s %s' % (teschema, rschema, eschema, role)
- url = self.linkto_url(entity, rschema, teschema, 'subject')
- yield self.build_action(self._cw._(label), url, **params)
+ url = self.linkto_url(entity, rschema, teschema, 'subject', **params)
+ yield self.build_action(self._cw._(label), url)
def add_related_schemas(self, entity):
"""this is actually used ui method to generate 'addrelated' actions from
--- a/web/views/schema.py Fri Jun 18 18:31:22 2010 +0200
+++ b/web/views/schema.py Mon Jun 21 13:23:11 2010 +0200
@@ -575,7 +575,7 @@
entity = self.cw_rset.get_entity(row, col)
rschema = self._cw.vreg.schema.rschema(entity.rtype.name)
rdef = rschema.rdefs[(entity.stype.name, entity.otype.name)]
- constraints = [xml_escape(str(c)) for c in getattr(rdef, 'constraints')]
+ constraints = [xml_escape(unicode(c)) for c in getattr(rdef, 'constraints')]
self.w(u'<br/>'.join(constraints))
class CWAttributeOptionsCell(EntityView):
--- a/web/views/sessions.py Fri Jun 18 18:31:22 2010 +0200
+++ b/web/views/sessions.py Mon Jun 21 13:23:11 2010 +0200
@@ -82,7 +82,7 @@
"""close session on logout or on invalid session detected (expired out,
corrupted...)
"""
- self.info('closing http session %s' % session)
+ self.info('closing http session %s' % session.sessionid)
del self._sessions[session.sessionid]
try:
session.cnx.close()
--- a/web/views/urlrewrite.py Fri Jun 18 18:31:22 2010 +0200
+++ b/web/views/urlrewrite.py Mon Jun 21 13:23:11 2010 +0200
@@ -15,9 +15,8 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Rules based url rewriter component, to get configurable RESTful urls.
+"""Rules based url rewriter component, to get configurable RESTful urls"""
-"""
import re
from cubicweb import typed_eid