--- a/__pkginfo__.py Mon Aug 10 09:58:40 2009 -0700
+++ b/__pkginfo__.py Tue Aug 11 17:19:05 2009 +0200
@@ -7,7 +7,7 @@
distname = "cubicweb"
modname = "cubicweb"
-numversion = (3, 4, 2)
+numversion = (3, 4, 3)
version = '.'.join(str(num) for num in numversion)
license = 'LGPL v2'
--- a/cwconfig.py Mon Aug 10 09:58:40 2009 -0700
+++ b/cwconfig.py Tue Aug 11 17:19:05 2009 +0200
@@ -316,7 +316,7 @@
return getattr(cls.cube_pkginfo(cube), '__recommend__', ())
@classmethod
- def expand_cubes(cls, cubes):
+ def expand_cubes(cls, cubes, with_recommends=False):
"""expand the given list of top level cubes used by adding recursivly
each cube dependencies
"""
@@ -329,6 +329,12 @@
depcube = CW_MIGRATION_MAP.get(depcube, depcube)
cubes.append(depcube)
todo.append(depcube)
+ if with_recommends:
+ for depcube in cls.cube_recommends(cube):
+ if depcube not in cubes:
+ depcube = CW_MIGRATION_MAP.get(depcube, depcube)
+ cubes.append(depcube)
+ todo.append(depcube)
return cubes
@classmethod
--- a/cwvreg.py Mon Aug 10 09:58:40 2009 -0700
+++ b/cwvreg.py Tue Aug 11 17:19:05 2009 +0200
@@ -250,8 +250,8 @@
def itervalues(self):
return (value for key, value in self.items())
- def reset(self):
- super(CubicWebVRegistry, self).reset()
+ def reset(self, path=None, force_reload=None):
+ super(CubicWebVRegistry, self).reset(path, force_reload)
self._needs_iface = {}
# two special registries, propertydefs which care all the property
# definitions, and propertyvals which contains values for those
@@ -260,13 +260,26 @@
self['propertyvalues'] = self.eprop_values = {}
for key, propdef in self.config.eproperty_definitions():
self.register_property(key, **propdef)
+ if path is not None and force_reload:
+ cleanup_sys_modules(path)
+ cubes = self.config.cubes()
+ # if the fs code use some cubes not yet registered into the instance
+ # we should cleanup sys.modules for those as well to avoid potential
+ # bad class reference pb after reloading
+ cfg = self.config
+ for cube in cfg.expand_cubes(cubes, with_recommends=True):
+ if not cube in cubes:
+ cpath = cfg.build_vregistry_cube_path([cfg.cube_dir(cube)])
+ cleanup_sys_modules(cpath)
def set_schema(self, schema):
"""set instance'schema and load application objects"""
self.schema = schema
clear_cache(self, 'rqlhelper')
# now we can load application's web objects
- self.register_objects(self.config.vregistry_path())
+ searchpath = self.config.vregistry_path()
+ self.reset(searchpath, force_reload=False)
+ self.register_objects(searchpath, force_reload=False)
# map lowered entity type names to their actual name
self.case_insensitive_etypes = {}
for etype in self.schema.entities():
@@ -302,13 +315,14 @@
def register_objects(self, path, force_reload=None):
"""overriden to remove objects requiring a missing interface"""
+ if force_reload is None:
+ force_reload = self.config.mode == 'dev'
try:
self._register_objects(path, force_reload)
except RegistryOutOfDate:
CW_EVENT_MANAGER.emit('before-registry-reload')
# modification detected, reset and reload
- self.reset()
- cleanup_sys_modules(path)
+ self.reset(path, force_reload)
self._register_objects(path, force_reload)
CW_EVENT_MANAGER.emit('after-registry-reload')
--- a/server/__init__.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/__init__.py Tue Aug 11 17:19:05 2009 +0200
@@ -24,11 +24,12 @@
# server-side debugging #########################################################
# server debugging flags. They may be combined using binary operators.
-DBG_NONE = 0 # no debug information
-DBG_RQL = 1 # rql execution information
-DBG_SQL = 2 # executed sql
-DBG_REPO = 4 # repository events
-DBG_MORE = 8 # repository events
+DBG_NONE = 0 # no debug information
+DBG_RQL = 1 # rql execution information
+DBG_SQL = 2 # executed sql
+DBG_REPO = 4 # repository events
+DBG_MS = 8 # multi-sources
+DBG_MORE = 16 # repository events
# current debug mode
DEBUG = 0
--- a/server/migractions.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/migractions.py Tue Aug 11 17:19:05 2009 +0200
@@ -19,7 +19,10 @@
import sys
import os
-from os.path import join, exists
+import tarfile
+import tempfile
+import shutil
+import os.path as osp
from datetime import datetime
from logilab.common.deprecation import deprecated
@@ -110,25 +113,77 @@
def backup_database(self, backupfile=None, askconfirm=True):
config = self.config
repo = self.repo_connect()
+ # paths
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
+ instbkdir = osp.join(config.appdatahome, 'backup')
+ if not osp.exists(instbkdir):
+ os.makedirs(instbkdir)
+ backupfile = backupfile or osp.join(instbkdir, '%s-%s.tar.gz'
+ % (config.appid, timestamp))
+ # check backup has to be done
+ if osp.exists(backupfile) and not \
+ self.confirm('Backup file %s exists, overwrite it?' % backupfile):
+ print '-> no backup done.'
+ return
+ elif askconfirm and not self.confirm('Backup %s database?' % config.appid):
+ print '-> no backup done.'
+ return
+ open(backupfile,'w').close() # kinda lock
+ os.chmod(backupfile, 0600)
+ # backup
+ tmpdir = tempfile.mkdtemp(dir=instbkdir)
for source in repo.sources:
- source.backup(self.confirm, backupfile, timestamp,
- askconfirm=askconfirm)
+ try:
+ source.backup(osp.join(tmpdir,source.uri))
+ except Exception, exc:
+ print '-> error trying to backup [%s]' % exc
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
+ bkup = tarfile.open(backupfile, 'w|gz')
+ for filename in os.listdir(tmpdir):
+ bkup.add(osp.join(tmpdir,filename), filename)
+ bkup.close()
+ shutil.rmtree(tmpdir)
+ # call hooks
repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp)
+ # done
+ print '-> backup file', backupfile
def restore_database(self, backupfile, drop=True, systemonly=True,
askconfirm=True):
config = self.config
repo = self.repo_connect()
+ # check
+ if not osp.exists(backupfile):
+ raise Exception("Backup file %s doesn't exist" % backupfile)
+ return
+ if askconfirm and not self.confirm('Restore %s database from %s ?'
+ % (config.appid, backupfile)):
+ return
+ # unpack backup
+ bkup = tarfile.open(backupfile, 'r|gz')
+ for name in bkup.getnames():
+ if name[0] in '/.':
+ raise Exception('Security check failed, path starts with "/" or "."')
+ bkup.close() # XXX seek error if not close+open !?!
+ bkup = tarfile.open(backupfile, 'r|gz')
+ tmpdir = tempfile.mkdtemp()
+ bkup.extractall(path=tmpdir)
if systemonly:
- repo.system_source.restore(self.confirm, backupfile=backupfile,
- drop=drop, askconfirm=askconfirm)
+ repo.system_source.restore(osp.join(tmpdir,'system'), drop=drop)
else:
- # in that case, backup file is expected to be a time stamp
for source in repo.sources:
- source.backup(self.confirm, timestamp=backupfile, drop=drop,
- askconfirm=askconfirm)
- repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
+ try:
+ source.restore(osp.join(tmpdir, source.uri), drop=drop)
+ except Exception, exc:
+ print '-> error trying to restore [%s]' % exc
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
+ bkup.close()
+ shutil.rmtree(tmpdir)
+ # call hooks
+ repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
+ print '-> database restored.'
@property
def cnx(self):
@@ -213,10 +268,10 @@
def exec_event_script(self, event, cubepath=None, funcname=None,
*args, **kwargs):
if cubepath:
- apc = join(cubepath, 'migration', '%s.py' % event)
+ apc = osp.join(cubepath, 'migration', '%s.py' % event)
else:
- apc = join(self.config.migration_scripts_dir(), '%s.py' % event)
- if exists(apc):
+ apc = osp.join(self.config.migration_scripts_dir(), '%s.py' % event)
+ if osp.exists(apc):
if self.config.free_wheel:
from cubicweb.server.hooks import setowner_after_add_entity
self.repo.hm.unregister_hook(setowner_after_add_entity,
--- a/server/msplanner.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/msplanner.py Tue Aug 11 17:19:05 2009 +0200
@@ -267,7 +267,7 @@
self._conflicts = []
if rqlhelper is not None: # else test
self._insert_identity_variable = rqlhelper._annotator.rewrite_shared_optional
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print 'sourcesterms:'
self._debug_sourcesterms()
@@ -1023,7 +1023,7 @@
the rqlst should not be tagged at this point
"""
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print '-'*80
print 'PLANNING', rqlst
for select in rqlst.children:
@@ -1040,7 +1040,7 @@
ppis = [PartPlanInformation(plan, select, self.rqlhelper)
for select in rqlst.children]
steps = self._union_plan(plan, rqlst, ppis)
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
from pprint import pprint
for step in plan.steps:
pprint(step.test_repr())
@@ -1235,7 +1235,7 @@
return rqlst
def filter(self, sources, terms, rqlst, solindices, needsel, final):
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print 'filter', final and 'final' or '', sources, terms, rqlst, solindices, needsel
newroot = Select()
self.sources = sorted(sources)
@@ -1329,7 +1329,7 @@
elif ored:
newroot.remove_node(rel)
add_types_restriction(self.schema, rqlst, newroot, solutions)
- if server.DEBUG:
+ if server.DEBUG & server.DBG_MS:
print '--->', newroot
return newroot, self.insertedvars
--- a/server/pool.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/pool.py Tue Aug 11 17:19:05 2009 +0200
@@ -33,6 +33,17 @@
self.source_cnxs['system'] = self.source_cnxs[sources[0].uri]
self._cursors = {}
+ def __getitem__(self, uri):
+ """subscription notation provide access to sources'cursors"""
+ try:
+ cursor = self._cursors[uri]
+ except KeyError:
+ cursor = self.source_cnxs[uri][1].cursor()
+ if cursor is not None:
+ # None possible on sources without cursor support such as ldap
+ self._cursors[uri] = cursor
+ return cursor
+
def commit(self):
"""commit the current transaction for this user"""
# FIXME: what happends if a commit fail
@@ -77,22 +88,11 @@
for source, cnx in self.source_cnxs.values():
source.pool_reset(cnx)
- def __getitem__(self, uri):
- """subscription notation provide access to sources'cursors"""
- try:
- cursor = self._cursors[uri]
- except KeyError:
- cursor = self.source_cnxs[uri][1].cursor()
- if cursor is not None:
- # None possible on sources without cursor support such as ldap
- self._cursors[uri] = cursor
- return cursor
-
def sources(self):
"""return the source objects handled by this pool"""
# implementation details of flying insert requires the system source
# first
- yield self.source_cnxs['system']
+ yield self.source_cnxs['system'][0]
for uri, (source, cursor) in self.source_cnxs.items():
if uri == 'system':
continue
@@ -107,11 +107,17 @@
"""return the connection on the source object with the given uri"""
return self.source_cnxs[uid][1]
- def reconnect(self, source):
- """reopen a connection for this source"""
- source.info('trying to reconnect')
- self.source_cnxs[source.uri] = (source, source.get_connection())
- del self._cursors[source.uri]
+ def reconnect(self, source=None):
+ """reopen a connection for this source or all sources if none specified
+ """
+ if source is None:
+ sources = self.sources()
+ else:
+ sources = (source,)
+ for source in sources:
+ source.info('trying to reconnect')
+ self.source_cnxs[source.uri] = (source, source.get_connection())
+ self._cursors.pop(source.uri, None)
def check_connections(self):
for source, cnx in self.source_cnxs.itervalues():
--- a/server/repository.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/repository.py Tue Aug 11 17:19:05 2009 +0200
@@ -1030,6 +1030,9 @@
if rtype in VIRTUAL_RTYPES:
continue
entity.set_related_cache(rtype, 'object', session.empty_rset())
+ # set inline relation cache before call to after_add_entity
+ for attr, value in relations:
+ session.update_rel_cache_add(entity.eid, attr, value)
# trigger after_add_entity after after_add_relation
if source.should_call_hooks:
self.hm.call_hooks('after_add_entity', etype, session, entity)
@@ -1037,7 +1040,6 @@
for attr, value in relations:
self.hm.call_hooks('before_add_relation', attr, session,
entity.eid, attr, value)
- session.update_rel_cache_add(entity.eid, attr, value)
self.hm.call_hooks('after_add_relation', attr, session,
entity.eid, attr, value)
return entity.eid
--- a/server/session.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/session.py Tue Aug 11 17:19:05 2009 +0200
@@ -208,9 +208,9 @@
"""connections pool, set according to transaction mode for each query"""
return getattr(self._threaddata, 'pool', None)
- def set_pool(self):
+ def set_pool(self, checkclosed=True):
"""the session need a pool to execute some queries"""
- if self._closed:
+ if checkclosed and self._closed:
raise Exception('try to set pool on a closed session')
if self.pool is None:
# get pool first to avoid race-condition
@@ -335,7 +335,7 @@
csession = ChildSession(self)
self._threaddata.childsession = csession
# need shared pool set
- self.set_pool()
+ self.set_pool(checkclosed=False)
return csession
def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
--- a/server/sources/__init__.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/sources/__init__.py Tue Aug 11 17:19:05 2009 +0200
@@ -95,30 +95,11 @@
"""method called by the repository once ready to handle request"""
pass
- def backup_file(self, backupfile=None, timestamp=None):
- """return a unique file name for a source's dump
-
- either backupfile or timestamp (used to generated a backup file name if
- needed) should be specified.
- """
- if backupfile is None:
- config = self.repo.config
- return join(config.appdatahome, 'backup',
- '%s-%s-%s.dump' % (config.appid, timestamp, self.uri))
- # backup file is the system database backup file, add uri to it if not
- # already there
- base, ext = splitext(backupfile)
- if not base.endswith('-%s' % self.uri):
- return '%s-%s%s' % (base, self.uri, ext)
- return backupfile
-
- def backup(self, confirm, backupfile=None, timestamp=None,
- askconfirm=False):
+ def backup(self, backupfile):
"""method called to create a backup of source's data"""
pass
- def restore(self, confirm, backupfile=None, timestamp=None, drop=True,
- askconfirm=False):
+ def restore(self, backupfile):
"""method called to restore a backup of source's data"""
pass
--- a/server/sources/extlite.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/sources/extlite.py Tue Aug 11 17:19:05 2009 +0200
@@ -11,8 +11,7 @@
from os.path import join, exists
from cubicweb import server
-from cubicweb.server.sqlutils import (SQL_PREFIX, SQLAdapterMixIn, sqlexec,
- sql_source_backup, sql_source_restore)
+from cubicweb.server.sqlutils import SQL_PREFIX, SQLAdapterMixIn, sqlexec
from cubicweb.server.sources import native, rql2sql
from cubicweb.server.sources import AbstractSource, dbg_st_search, dbg_results
@@ -36,13 +35,13 @@
def commit(self):
if self._cnx is not None:
- if server.DEBUG & server.DBG_SQL:
+ if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
print 'sql cnx COMMIT', self._cnx
self._cnx.commit()
def rollback(self):
if self._cnx is not None:
- if server.DEBUG & server.DBG_SQL:
+ if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
print 'sql cnx ROLLBACK', self._cnx
self._cnx.rollback()
@@ -94,18 +93,21 @@
AbstractSource.__init__(self, repo, appschema, source_config,
*args, **kwargs)
- def backup(self, confirm, backupfile=None, timestamp=None, askconfirm=False):
- """method called to create a backup of source's data"""
- backupfile = self.backup_file(backupfile, timestamp)
- sql_source_backup(self, self.sqladapter, confirm, backupfile,
- askconfirm)
+ def backup(self, backupfile):
+ """method called to create a backup of the source's data"""
+ self.close_pool_connections()
+ try:
+ self.sqladapter.backup_to_file(backupfile)
+ finally:
+ self.open_pool_connections()
- def restore(self, confirm, backupfile=None, timestamp=None, drop=True,
- askconfirm=False):
+ def restore(self, backupfile, drop):
"""method called to restore a backup of source's data"""
- backupfile = self.backup_file(backupfile, timestamp)
- sql_source_restore(self, self.sqladapter, confirm, backupfile, drop,
- askconfirm)
+ self.close_pool_connections()
+ try:
+ self.sqladapter.restore_from_file(backupfile, drop)
+ finally:
+ self.open_pool_connections()
@property
def _sqlcnx(self):
--- a/server/sources/ldapuser.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/sources/ldapuser.py Tue Aug 11 17:19:05 2009 +0200
@@ -181,6 +181,7 @@
def reset_caches(self):
"""method called during test to reset potential source caches"""
+ self._cache = {}
self._query_cache = TimedCache(2*60)
def init(self):
--- a/server/sources/native.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/sources/native.py Tue Aug 11 17:19:05 2009 +0200
@@ -25,8 +25,7 @@
from cubicweb import UnknownEid, AuthenticationError, Binary, server
from cubicweb.server.utils import crypt_password
-from cubicweb.server.sqlutils import (SQL_PREFIX, SQLAdapterMixIn,
- sql_source_backup, sql_source_restore)
+from cubicweb.server.sqlutils import SQL_PREFIX, SQLAdapterMixIn
from cubicweb.server.rqlannotation import set_qdata
from cubicweb.server.sources import AbstractSource, dbg_st_search, dbg_results
from cubicweb.server.sources.rql2sql import SQLGenerator
@@ -207,17 +206,21 @@
pool.pool_reset()
self.repo._free_pool(pool)
- def backup(self, confirm, backupfile=None, timestamp=None,
- askconfirm=False):
- """method called to create a backup of source's data"""
- backupfile = self.backup_file(backupfile, timestamp)
- sql_source_backup(self, self, confirm, backupfile, askconfirm)
+ def backup(self, backupfile):
+ """method called to create a backup of the source's data"""
+ self.close_pool_connections()
+ try:
+ self.backup_to_file(backupfile)
+ finally:
+ self.open_pool_connections()
- def restore(self, confirm, backupfile=None, timestamp=None, drop=True,
- askconfirm=False):
+ def restore(self, backupfile, drop):
"""method called to restore a backup of source's data"""
- backupfile = self.backup_file(backupfile, timestamp)
- sql_source_restore(self, self, confirm, backupfile, drop, askconfirm)
+ self.close_pool_connections()
+ try:
+ self.restore_from_file(backupfile, drop)
+ finally:
+ self.open_pool_connections()
def init(self):
self.init_creating()
--- a/server/sources/pyrorql.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/sources/pyrorql.py Tue Aug 11 17:19:05 2009 +0200
@@ -127,6 +127,10 @@
register_persistent_options(myoptions)
self._query_cache = TimedCache(30)
+ def reset_caches(self):
+ """method called during test to reset potential source caches"""
+ self._query_cache = TimedCache(30)
+
def last_update_time(self):
pkey = u'sources.%s.latest-update-time' % self.uri
rql = 'Any V WHERE X is CWProperty, X value V, X pkey %(k)s'
--- a/server/sqlutils.py Mon Aug 10 09:58:40 2009 -0700
+++ b/server/sqlutils.py Tue Aug 11 17:19:05 2009 +0200
@@ -120,39 +120,6 @@
skip_relations=skip_relations))
return '\n'.join(output)
-
-def sql_source_backup(source, sqladapter, confirm, backupfile,
- askconfirm=False):
- if exists(backupfile):
- if not confirm('Backup file %s exists, overwrite it?' % backupfile):
- return
- elif askconfirm and not confirm('Backup %s database?'
- % source.repo.config.appid):
- print '-> no backup done.'
- return
- # should close opened connection before backuping
- source.close_pool_connections()
- try:
- sqladapter.backup_to_file(backupfile, confirm)
- finally:
- source.open_pool_connections()
-
-def sql_source_restore(source, sqladapter, confirm, backupfile, drop=True,
- askconfirm=False):
- if not exists(backupfile):
- raise Exception("backup file %s doesn't exist" % backupfile)
- app = source.repo.config.appid
- if askconfirm and not confirm('Restore %s %s database from %s ?'
- % (app, source.uri, backupfile)):
- return
- # should close opened connection before restoring
- source.close_pool_connections()
- try:
- sqladapter.restore_from_file(backupfile, confirm, drop=drop)
- finally:
- source.open_pool_connections()
-
-
try:
from mx.DateTime import DateTimeType, DateTimeDeltaType
except ImportError:
@@ -196,25 +163,12 @@
#self.dbapi_module.type_code_test(cnx.cursor())
return cnx
- def backup_to_file(self, backupfile, confirm):
+ def backup_to_file(self, backupfile):
cmd = self.dbhelper.backup_command(self.dbname, self.dbhost,
self.dbuser, backupfile,
keepownership=False)
- backupdir = os.path.dirname(backupfile)
- if not os.path.exists(backupdir):
- if confirm('%s does not exist. Create it?' % backupdir,
- abort=False, shell=False):
- os.makedirs(backupdir)
- else:
- print '-> failed to backup instance'
- return
if os.system(cmd):
- print '-> error trying to backup with command', cmd
- if not confirm('Continue anyway?', default='n'):
- raise SystemExit(1)
- else:
- print '-> backup file', backupfile
- restrict_perms_to_user(backupfile, self.info)
+ raise Exception('Failed command: %s' % cmd)
def restore_from_file(self, backupfile, confirm, drop=True):
for cmd in self.dbhelper.restore_commands(self.dbname, self.dbhost,
@@ -222,19 +176,8 @@
self.encoding,
keepownership=False,
drop=drop):
- while True:
- print cmd
- if os.system(cmd):
- print '-> error while restoring the base'
- answer = confirm('Continue anyway?',
- shell=False, abort=False, retry=True)
- if not answer:
- raise SystemExit(1)
- if answer == 1: # 1: continue, 2: retry
- break
- else:
- break
- print '-> database restored.'
+ if os.system(cmd):
+ raise Exception('Failed command: %s' % cmd)
def merge_args(self, args, query_args):
if args is not None:
--- a/vregistry.py Mon Aug 10 09:58:40 2009 -0700
+++ b/vregistry.py Tue Aug 11 17:19:05 2009 +0200
@@ -206,7 +206,7 @@
super(VRegistry, self).__init__()
self.config = config
- def reset(self, force_reload=None):
+ def reset(self, path=None, force_reload=None):
self.clear()
self._lastmodifs = {}
@@ -318,14 +318,7 @@
self._loadedmods = {}
return filemods
- def register_objects(self, path, force_reload=None, extrapath=None):
- if force_reload is None:
- force_reload = self.config.mode == 'dev'
- elif not force_reload:
- # force_reload == False usually mean modules have been reloaded
- # by another connection, so we want to update the registry
- # content even if there has been no module content modification
- self.reset()
+ def register_objects(self, path, force_reload, extrapath=None):
# need to clean sys.path this to avoid import confusion pb (i.e.
# having the same module loaded as 'cubicweb.web.views' subpackage and
# as views' or 'web.views' subpackage
--- a/web/test/unittest_form.py Mon Aug 10 09:58:40 2009 -0700
+++ b/web/test/unittest_form.py Tue Aug 11 17:19:05 2009 +0200
@@ -124,12 +124,14 @@
creation_date = DateTimeField(widget=DateTimePicker)
form = CustomChangeStateForm(self.req, redirect_path='perdu.com',
entity=self.entity)
- form.form_render(state=123, trcomment=u'')
+ form.form_render(state=123, trcomment=u'',
+ trcomment_format=u'text/plain')
def test_change_state_form(self):
form = ChangeStateForm(self.req, redirect_path='perdu.com',
entity=self.entity)
- form.form_render(state=123, trcomment=u'')
+ form.form_render(state=123, trcomment=u'',
+ trcomment_format=u'text/plain')
# fields tests ############################################################
@@ -161,7 +163,7 @@
def test_richtextfield_2(self):
self.req.use_fckeditor = lambda: True
- self._test_richtextfield('<input name="description_format:%(eid)s" style="display: block" type="hidden" value="text/rest" /><textarea cols="80" cubicweb:type="wysiwyg" id="description:%(eid)s" name="description:%(eid)s" onkeyup="autogrow(this)" rows="2" tabindex="1"></textarea>')
+ self._test_richtextfield('<input name="description_format:%(eid)s" type="hidden" value="text/rest" /><textarea cols="80" cubicweb:type="wysiwyg" id="description:%(eid)s" name="description:%(eid)s" onkeyup="autogrow(this)" rows="2" tabindex="1"></textarea>')
def test_filefield(self):