--- a/_gcdebug.py Fri Sep 11 14:28:06 2015 +0200
+++ b/_gcdebug.py Fri Sep 11 14:52:09 2015 +0200
@@ -15,6 +15,7 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import print_function
import gc, types, weakref
@@ -68,7 +69,7 @@
except KeyError:
ocounters[key] = 1
if isinstance(obj, viewreferrersclasses):
- print ' ', obj, referrers(obj, showobjs, maxlevel)
+ print(' ', obj, referrers(obj, showobjs, maxlevel))
garbage = [repr(obj) for obj in gc.garbage]
return counters, ocounters, garbage
--- a/cwconfig.py Fri Sep 11 14:28:06 2015 +0200
+++ b/cwconfig.py Fri Sep 11 14:52:09 2015 +0200
@@ -164,6 +164,7 @@
Directory where pid files will be written
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
_ = unicode
@@ -1188,8 +1189,8 @@
def set_sources_mode(self, sources):
if not 'all' in sources:
- print 'warning: ignoring specified sources, requires a repository '\
- 'configuration'
+ print('warning: ignoring specified sources, requires a repository '
+ 'configuration')
def i18ncompile(self, langs=None):
from cubicweb import i18n
--- a/cwctl.py Fri Sep 11 14:28:06 2015 +0200
+++ b/cwctl.py Fri Sep 11 14:52:09 2015 +0200
@@ -18,6 +18,7 @@
"""the cubicweb-ctl tool, based on logilab.common.clcommands to
provide a pluggable commands system.
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -120,8 +121,8 @@
_allinstances.remove(line)
allinstances.append(line)
except ValueError:
- print ('ERROR: startorder file contains unexistant '
- 'instance %s' % line)
+ print('ERROR: startorder file contains unexistant '
+ 'instance %s' % line)
allinstances += _allinstances
else:
allinstances = _allinstances
@@ -146,7 +147,7 @@
status = 0
for appid in args:
if askconfirm:
- print '*'*72
+ print('*'*72)
if not ASK.confirm('%s instance %r ?' % (self.name, appid)):
continue
try:
@@ -184,13 +185,13 @@
forkcmd = None
for appid in args:
if askconfirm:
- print '*'*72
+ print('*'*72)
if not ASK.confirm('%s instance %r ?' % (self.name, appid)):
continue
if forkcmd:
status = system('%s %s' % (forkcmd, appid))
if status:
- print '%s exited with status %s' % (forkcmd, status)
+ print('%s exited with status %s' % (forkcmd, status))
else:
self.run_arg(appid)
@@ -224,19 +225,19 @@
from cubicweb.migration import ConfigurationProblem
if mode == 'all':
- print 'CubicWeb %s (%s mode)' % (cwcfg.cubicweb_version(), cwcfg.mode)
- print
+ print('CubicWeb %s (%s mode)' % (cwcfg.cubicweb_version(), cwcfg.mode))
+ print()
if mode in ('all', 'config', 'configurations'):
- print 'Available configurations:'
+ print('Available configurations:')
for config in CONFIGURATIONS:
- print '*', config.name
+ print('*', config.name)
for line in config.__doc__.splitlines():
line = line.strip()
if not line:
continue
- print ' ', line
- print
+ print(' ', line)
+ print()
if mode in ('all', 'cubes'):
cfgpb = ConfigurationProblem(cwcfg)
@@ -244,11 +245,11 @@
cubesdir = pathsep.join(cwcfg.cubes_search_path())
namesize = max(len(x) for x in cwcfg.available_cubes())
except ConfigurationError as ex:
- print 'No cubes available:', ex
+ print('No cubes available:', ex)
except ValueError:
- print 'No cubes available in %s' % cubesdir
+ print('No cubes available in %s' % cubesdir)
else:
- print 'Available cubes (%s):' % cubesdir
+ print('Available cubes (%s):' % cubesdir)
for cube in cwcfg.available_cubes():
try:
tinfo = cwcfg.cube_pkginfo(cube)
@@ -257,59 +258,59 @@
except (ConfigurationError, AttributeError) as ex:
tinfo = None
tversion = '[missing cube information: %s]' % ex
- print '* %s %s' % (cube.ljust(namesize), tversion)
+ print('* %s %s' % (cube.ljust(namesize), tversion))
if self.config.verbose:
if tinfo:
descr = getattr(tinfo, 'description', '')
if not descr:
descr = tinfo.__doc__
if descr:
- print ' '+ ' \n'.join(descr.splitlines())
+ print(' '+ ' \n'.join(descr.splitlines()))
modes = detect_available_modes(cwcfg.cube_dir(cube))
- print ' available modes: %s' % ', '.join(modes)
- print
+ print(' available modes: %s' % ', '.join(modes))
+ print()
if mode in ('all', 'instances'):
try:
regdir = cwcfg.instances_dir()
except ConfigurationError as ex:
- print 'No instance available:', ex
- print
+ print('No instance available:', ex)
+ print()
return
instances = list_instances(regdir)
if instances:
- print 'Available instances (%s):' % regdir
+ print('Available instances (%s):' % regdir)
for appid in instances:
modes = cwcfg.possible_configurations(appid)
if not modes:
- print '* %s (BROKEN instance, no configuration found)' % appid
+ print('* %s (BROKEN instance, no configuration found)' % appid)
continue
- print '* %s (%s)' % (appid, ', '.join(modes))
+ print('* %s (%s)' % (appid, ', '.join(modes)))
try:
config = cwcfg.config_for(appid, modes[0])
except Exception as exc:
- print ' (BROKEN instance, %s)' % exc
+ print(' (BROKEN instance, %s)' % exc)
continue
else:
- print 'No instance available in %s' % regdir
- print
+ print('No instance available in %s' % regdir)
+ print()
if mode == 'all':
# configuration management problem solving
cfgpb.solve()
if cfgpb.warnings:
- print 'Warnings:\n', '\n'.join('* '+txt for txt in cfgpb.warnings)
+ print('Warnings:\n', '\n'.join('* '+txt for txt in cfgpb.warnings))
if cfgpb.errors:
- print 'Errors:'
+ print('Errors:')
for op, cube, version, src in cfgpb.errors:
if op == 'add':
- print '* cube', cube,
+ print('* cube', cube, end=' ')
if version:
- print ' version', version,
- print 'is not installed, but required by %s' % src
+ print(' version', version, end=' ')
+ print('is not installed, but required by %s' % src)
else:
- print '* cube %s version %s is installed, but version %s is required by %s' % (
- cube, cfgpb.cubes[cube], version, src)
+ print('* cube %s version %s is installed, but version %s is required by %s' % (
+ cube, cfgpb.cubes[cube], version, src))
def check_options_consistency(config):
if config.automatic and config.config_level > 0:
@@ -380,20 +381,20 @@
templdirs = [cwcfg.cube_dir(cube)
for cube in cubes]
except ConfigurationError as ex:
- print ex
- print '\navailable cubes:',
- print ', '.join(cwcfg.available_cubes())
+ print(ex)
+ print('\navailable cubes:', end=' ')
+ print(', '.join(cwcfg.available_cubes()))
return
# create the registry directory for this instance
- print '\n'+underline_title('Creating the instance %s' % appid)
+ print('\n'+underline_title('Creating the instance %s' % appid))
create_dir(config.apphome)
# cubicweb-ctl configuration
if not self.config.automatic:
- print '\n'+underline_title('Configuring the instance (%s.conf)'
- % configname)
+ print('\n'+underline_title('Configuring the instance (%s.conf)'
+ % configname))
config.input_config('main', self.config.config_level)
# configuration'specific stuff
- print
+ print()
helper.bootstrap(cubes, self.config.automatic, self.config.config_level)
# input for cubes specific options
if not self.config.automatic:
@@ -402,23 +403,23 @@
and odict.get('level') <= self.config.config_level)
for section in sections:
if section not in ('main', 'email', 'web'):
- print '\n' + underline_title('%s options' % section)
+ print('\n' + underline_title('%s options' % section))
config.input_config(section, self.config.config_level)
# write down configuration
config.save()
self._handle_win32(config, appid)
- print '-> generated config %s' % config.main_config_file()
+ print('-> generated config %s' % config.main_config_file())
# handle i18n files structure
# in the first cube given
from cubicweb import i18n
langs = [lang for lang, _ in i18n.available_catalogs(join(templdirs[0], 'i18n'))]
errors = config.i18ncompile(langs)
if errors:
- print '\n'.join(errors)
+ print('\n'.join(errors))
if self.config.automatic \
or not ASK.confirm('error while compiling message catalogs, '
'continue anyway ?'):
- print 'creation not completed'
+ print('creation not completed')
return
# create the additional data directory for this instance
if config.appdatahome != config.apphome: # true in dev mode
@@ -427,9 +428,9 @@
if config['uid']:
from logilab.common.shellutils import chown
# this directory should be owned by the uid of the server process
- print 'set %s as owner of the data directory' % config['uid']
+ print('set %s as owner of the data directory' % config['uid'])
chown(config.appdatahome, config['uid'])
- print '\n-> creation done for %s\n' % repr(config.apphome)[1:-1]
+ print('\n-> creation done for %s\n' % repr(config.apphome)[1:-1])
if not self.config.no_db_create:
helper.postcreate(self.config.automatic, self.config.config_level)
@@ -487,7 +488,7 @@
if ex.errno != errno.ENOENT:
raise
confignames = ', '.join([config.name for config in configs])
- print '-> instance %s (%s) deleted.' % (appid, confignames)
+ print('-> instance %s (%s) deleted.' % (appid, confignames))
# instance commands ########################################################
@@ -551,7 +552,7 @@
the --force option."
raise ExecutionError(msg % (appid, pidf))
if helper.start_server(config) == 1:
- print 'instance %s started' % appid
+ print('instance %s started' % appid)
def init_cmdline_log_threshold(config, loglevel):
@@ -606,7 +607,7 @@
except OSError:
# already removed by twistd
pass
- print 'instance %s stopped' % appid
+ print('instance %s stopped' % appid)
class RestartInstanceCommand(StartInstanceCommand):
@@ -630,7 +631,7 @@
# get instances in startorder
for appid in args:
if askconfirm:
- print '*'*72
+ print('*'*72)
if not ASK.confirm('%s instance %r ?' % (self.name, appid)):
continue
StopInstanceCommand(self.logger).stop_instance(appid)
@@ -677,14 +678,14 @@
status = 0
for mode in cwcfg.possible_configurations(appid):
config = cwcfg.config_for(appid, mode)
- print '[%s-%s]' % (appid, mode),
+ print('[%s-%s]' % (appid, mode), end=' ')
try:
pidf = config['pid-file']
except KeyError:
- print 'buggy instance, pid file not specified'
+ print('buggy instance, pid file not specified')
continue
if not exists(pidf):
- print "doesn't seem to be running"
+ print("doesn't seem to be running")
status = 1
continue
pid = int(open(pidf).read().strip())
@@ -692,10 +693,10 @@
try:
getpgid(pid)
except OSError:
- print "should be running with pid %s but the process can not be found" % pid
+ print("should be running with pid %s but the process can not be found" % pid)
status = 1
continue
- print "running with pid %s" % (pid)
+ print("running with pid %s" % (pid))
return status
class UpgradeInstanceCommand(InstanceCommandFork):
@@ -756,7 +757,7 @@
)
def upgrade_instance(self, appid):
- print '\n' + underline_title('Upgrading the instance %s' % appid)
+ print('\n' + underline_title('Upgrading the instance %s' % appid))
from logilab.common.changelog import Version
config = cwcfg.config_for(appid)
instance_running = exists(config['pid-file'])
@@ -797,30 +798,30 @@
# run cubicweb/componants migration scripts
if self.config.fs_only or toupgrade:
for cube, fromversion, toversion in toupgrade:
- print '-> migration needed from %s to %s for %s' % (fromversion, toversion, cube)
+ print('-> migration needed from %s to %s for %s' % (fromversion, toversion, cube))
with mih.cnx:
with mih.cnx.security_enabled(False, False):
mih.migrate(vcconf, reversed(toupgrade), self.config)
else:
- print '-> no data migration needed for instance %s.' % appid
+ print('-> no data migration needed for instance %s.' % appid)
# rewrite main configuration file
mih.rewrite_configuration()
mih.shutdown()
# handle i18n upgrade
if not self.i18nupgrade(config):
return
- print
+ print()
if helper:
helper.postupgrade(repo)
- print '-> instance migrated.'
+ print('-> instance migrated.')
if instance_running and not (CWDEV or self.config.nostartstop):
# restart instance through fork to get a proper environment, avoid
# uicfg pb (and probably gettext catalogs, to check...)
forkcmd = '%s start %s' % (sys.argv[0], appid)
status = system(forkcmd)
if status:
- print '%s exited with status %s' % (forkcmd, status)
- print
+ print('%s exited with status %s' % (forkcmd, status))
+ print()
def i18nupgrade(self, config):
# handle i18n upgrade:
@@ -832,10 +833,10 @@
langs = [lang for lang, _ in i18n.available_catalogs(join(templdir, 'i18n'))]
errors = config.i18ncompile(langs)
if errors:
- print '\n'.join(errors)
+ print('\n'.join(errors))
if not ASK.confirm('Error while compiling message catalogs, '
'continue anyway?'):
- print '-> migration not completed.'
+ print('-> migration not completed.')
return False
return True
@@ -858,7 +859,7 @@
config.set_sources_mode(('migration',))
vcconf = config.repository().get_versions()
for key in sorted(vcconf):
- print key+': %s.%s.%s' % vcconf[key]
+ print(key+': %s.%s.%s' % vcconf[key])
class ShellCommand(Command):
"""Run an interactive migration shell on an instance. This is a python shell
@@ -939,9 +940,9 @@
repo = get_repository(appuri)
cnx = connect(repo, login=login, password=pwd, mulcnx=False)
except AuthenticationError as ex:
- print ex
+ print(ex)
except (KeyboardInterrupt, EOFError):
- print
+ print()
sys.exit(0)
else:
break
@@ -1002,7 +1003,7 @@
config.init_cubes(repo.get_cubes())
errors = config.i18ncompile()
if errors:
- print '\n'.join(errors)
+ print('\n'.join(errors))
class ListInstancesCommand(Command):
@@ -1014,7 +1015,7 @@
"""run the command with its specific arguments"""
regdir = cwcfg.instances_dir()
for appid in sorted(listdir(regdir)):
- print appid
+ print(appid)
class ListCubesCommand(Command):
@@ -1025,7 +1026,7 @@
def run(self, args):
"""run the command with its specific arguments"""
for cube in cwcfg.available_cubes():
- print cube
+ print(cube)
class ConfigureInstanceCommand(InstanceCommand):
"""Configure instance.
@@ -1144,10 +1145,10 @@
try:
CWCTL.run(args)
except ConfigurationError as err:
- print 'ERROR: ', err
+ print('ERROR: ', err)
sys.exit(1)
except ExecutionError as err:
- print err
+ print(err)
sys.exit(2)
if __name__ == '__main__':
--- a/dataimport/csv.py Fri Sep 11 14:28:06 2015 +0200
+++ b/dataimport/csv.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,8 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""Functions to help importing CSV data"""
-
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
import csv as csvmod
import warnings
@@ -64,7 +63,7 @@
yield urow
if withpb:
pb.update()
- print ' %s rows imported' % rowcount
+ print(' %s rows imported' % rowcount)
def ucsvreader(stream, encoding='utf-8', delimiter=',', quotechar='"',
--- a/dataimport/deprecated.py Fri Sep 11 14:28:06 2015 +0200
+++ b/dataimport/deprecated.py Fri Sep 11 14:52:09 2015 +0200
@@ -58,6 +58,7 @@
.. BUG file with one column are not parsable
.. TODO rollback() invocation is not possible yet
"""
+from __future__ import print_function
import sys
import traceback
@@ -103,7 +104,7 @@
@deprecated('[3.21] deprecated')
def tell(msg):
- print msg
+ print(msg)
@deprecated('[3.21] deprecated')
--- a/dataimport/pgstore.py Fri Sep 11 14:28:06 2015 +0200
+++ b/dataimport/pgstore.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""Postgres specific store"""
+from __future__ import print_function
import threading
import warnings
@@ -52,7 +53,7 @@
for t in threads:
t.join()
except Exception:
- print 'Error in import statements'
+ print('Error in import statements')
def _execmany_thread_not_copy_from(cu, statement, data, table=None,
columns=None, encoding='utf-8'):
@@ -100,7 +101,7 @@
columns = list(data[0])
execmany_func(cu, statement, data, table, columns, encoding)
except Exception:
- print 'unable to copy data into table %s' % table
+ print('unable to copy data into table %s' % table)
# Error in import statement, save data in dump_output_dir
if dump_output_dir is not None:
pdata = {'data': data, 'statement': statement,
@@ -111,7 +112,7 @@
'%s.pickle' % filename), 'w') as fobj:
fobj.write(cPickle.dumps(pdata))
except IOError:
- print 'ERROR while pickling in', dump_output_dir, filename+'.pickle'
+ print('ERROR while pickling in', dump_output_dir, filename+'.pickle')
pass
cnx.rollback()
raise
@@ -335,7 +336,7 @@
self._sql.eid_insertdicts = {}
def flush(self):
- print 'starting flush'
+ print('starting flush')
_entities_sql = self._sql.entities
_relations_sql = self._sql.relations
_inlined_relations_sql = self._sql.inlined_relations
--- a/devtools/__init__.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/__init__.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""Test tools for cubicweb"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -109,7 +110,7 @@
try:
repo.close(sessionid)
except BadConnectionId: #this is strange ? thread issue ?
- print 'XXX unknown session', sessionid
+ print('XXX unknown session', sessionid)
for cnxset in repo.cnxsets:
cnxset.close(True)
repo.system_source.shutdown()
@@ -193,7 +194,7 @@
def sources_file(self):
"""define in subclasses self.sourcefile if necessary"""
if self.sourcefile:
- print 'Reading sources from', self.sourcefile
+ print('Reading sources from', self.sourcefile)
sourcefile = self.sourcefile
if not isabs(sourcefile):
sourcefile = join(self.apphome, sourcefile)
@@ -492,7 +493,7 @@
if test_db_id is DEFAULT_EMPTY_DB_ID:
self.init_test_database()
else:
- print 'Building %s for database %s' % (test_db_id, self.dbname)
+ print('Building %s for database %s' % (test_db_id, self.dbname))
self.build_db_cache(DEFAULT_EMPTY_DB_ID)
self.restore_database(DEFAULT_EMPTY_DB_ID)
repo = self.get_repo(startup=True)
--- a/devtools/devctl.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/devctl.py Fri Sep 11 14:52:09 2015 +0200
@@ -18,6 +18,7 @@
"""additional cubicweb-ctl commands and command handlers for cubicweb and
cubicweb's cubes development
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -314,8 +315,8 @@
from cubicweb.i18n import extract_from_tal, execute2
tempdir = tempfile.mkdtemp(prefix='cw-')
cwi18ndir = WebConfiguration.i18n_lib_dir()
- print '-> extract messages:',
- print 'schema',
+ print('-> extract messages:', end=' ')
+ print('schema', end=' ')
schemapot = osp.join(tempdir, 'schema.pot')
potfiles = [schemapot]
potfiles.append(schemapot)
@@ -324,11 +325,11 @@
schemapotstream = file(schemapot, 'w')
generate_schema_pot(schemapotstream.write, cubedir=None)
schemapotstream.close()
- print 'TAL',
+ print('TAL', end=' ')
tali18nfile = osp.join(tempdir, 'tali18n.py')
extract_from_tal(find(osp.join(BASEDIR, 'web'), ('.py', '.pt')),
tali18nfile)
- print '-> generate .pot files.'
+ print('-> generate .pot files.')
pyfiles = get_module_files(BASEDIR)
pyfiles += globfind(osp.join(BASEDIR, 'misc', 'migration'), '*.py')
schemafiles = globfind(osp.join(BASEDIR, 'schemas'), '*.py')
@@ -349,12 +350,12 @@
if osp.exists(potfile):
potfiles.append(potfile)
else:
- print '-> WARNING: %s file was not generated' % potfile
- print '-> merging %i .pot files' % len(potfiles)
+ print('-> WARNING: %s file was not generated' % potfile)
+ print('-> merging %i .pot files' % len(potfiles))
cubicwebpot = osp.join(tempdir, 'cubicweb.pot')
cmd = ['msgcat', '-o', cubicwebpot] + potfiles
execute2(cmd)
- print '-> merging main pot file with existing translations.'
+ print('-> merging main pot file with existing translations.')
chdir(cwi18ndir)
toedit = []
for lang in CubicWebNoAppConfiguration.cw_languages():
@@ -368,10 +369,10 @@
# cleanup
rm(tempdir)
# instructions pour la suite
- print '-> regenerated CubicWeb\'s .po catalogs.'
- print '\nYou can now edit the following files:'
- print '* ' + '\n* '.join(toedit)
- print 'when you are done, run "cubicweb-ctl i18ncube yourcube".'
+ print('-> regenerated CubicWeb\'s .po catalogs.')
+ print('\nYou can now edit the following files:')
+ print('* ' + '\n* '.join(toedit))
+ print('when you are done, run "cubicweb-ctl i18ncube yourcube".')
class UpdateCubeCatalogCommand(Command):
@@ -398,25 +399,25 @@
from subprocess import CalledProcessError
for cubedir in cubes:
if not osp.isdir(cubedir):
- print '-> ignoring %s that is not a directory.' % cubedir
+ print('-> ignoring %s that is not a directory.' % cubedir)
continue
try:
toedit = update_cube_catalogs(cubedir)
except CalledProcessError as exc:
- print '\n*** error while updating catalogs for cube', cubedir
- print 'cmd:\n%s' % exc.cmd
- print 'stdout:\n%s\nstderr:\n%s' % exc.data
+ print('\n*** error while updating catalogs for cube', cubedir)
+ print('cmd:\n%s' % exc.cmd)
+ print('stdout:\n%s\nstderr:\n%s' % exc.data)
except Exception:
import traceback
traceback.print_exc()
- print '*** error while updating catalogs for cube', cubedir
+ print('*** error while updating catalogs for cube', cubedir)
return False
else:
# instructions pour la suite
if toedit:
- print '-> regenerated .po catalogs for cube %s.' % cubedir
- print '\nYou can now edit the following files:'
- print '* ' + '\n* '.join(toedit)
+ print('-> regenerated .po catalogs for cube %s.' % cubedir)
+ print('\nYou can now edit the following files:')
+ print('* ' + '\n* '.join(toedit))
print ('When you are done, run "cubicweb-ctl i18ninstance '
'<yourinstance>" to see changes in your instances.')
return True
@@ -429,7 +430,7 @@
from cubicweb.i18n import extract_from_tal, execute2
cube = osp.basename(osp.normpath(cubedir))
tempdir = tempfile.mkdtemp()
- print underline_title('Updating i18n catalogs for cube %s' % cube)
+ print(underline_title('Updating i18n catalogs for cube %s' % cube))
chdir(cubedir)
if osp.exists(osp.join('i18n', 'entities.pot')):
warn('entities.pot is deprecated, rename file to static-messages.pot (%s)'
@@ -439,8 +440,8 @@
potfiles = [osp.join('i18n', 'static-messages.pot')]
else:
potfiles = []
- print '-> extracting messages:',
- print 'schema',
+ print('-> extracting messages:', end=' ')
+ print('schema', end=' ')
schemapot = osp.join(tempdir, 'schema.pot')
potfiles.append(schemapot)
# explicit close necessary else the file may not be yet flushed when
@@ -448,11 +449,11 @@
schemapotstream = file(schemapot, 'w')
generate_schema_pot(schemapotstream.write, cubedir)
schemapotstream.close()
- print 'TAL',
+ print('TAL', end=' ')
tali18nfile = osp.join(tempdir, 'tali18n.py')
ptfiles = find('.', ('.py', '.pt'), blacklist=STD_BLACKLIST+('test',))
extract_from_tal(ptfiles, tali18nfile)
- print 'Javascript'
+ print('Javascript')
jsfiles = [jsfile for jsfile in find('.', '.js')
if osp.basename(jsfile).startswith('cub')]
if jsfiles:
@@ -463,7 +464,7 @@
# no pot file created if there are no string to translate
if osp.exists(tmppotfile):
potfiles.append(tmppotfile)
- print '-> creating cube-specific catalog'
+ print('-> creating cube-specific catalog')
tmppotfile = osp.join(tempdir, 'generated.pot')
cubefiles = find('.', '.py', blacklist=STD_BLACKLIST+('test',))
cubefiles.append(tali18nfile)
@@ -473,20 +474,20 @@
if osp.exists(tmppotfile): # doesn't exists of no translation string found
potfiles.append(tmppotfile)
potfile = osp.join(tempdir, 'cube.pot')
- print '-> merging %i .pot files' % len(potfiles)
+ print('-> merging %i .pot files' % len(potfiles))
cmd = ['msgcat', '-o', potfile]
cmd.extend(potfiles)
execute2(cmd)
if not osp.exists(potfile):
- print 'no message catalog for cube', cube, 'nothing to translate'
+ print('no message catalog for cube', cube, 'nothing to translate')
# cleanup
rm(tempdir)
return ()
- print '-> merging main pot file with existing translations:',
+ print('-> merging main pot file with existing translations:', end=' ')
chdir('i18n')
toedit = []
for lang in CubicWebNoAppConfiguration.cw_languages():
- print lang,
+ print(lang, end=' ')
cubepo = '%s.po' % lang
if not osp.exists(cubepo):
shutil.copy(potfile, cubepo)
@@ -496,7 +497,7 @@
ensure_fs_mode(cubepo)
shutil.move('%snew' % cubepo, cubepo)
toedit.append(osp.abspath(cubepo))
- print
+ print()
# cleanup
rm(tempdir)
return toedit
@@ -620,7 +621,7 @@
" Please specify it using the --directory option")
cubesdir = cubespath[0]
if not osp.isdir(cubesdir):
- print "-> creating cubes directory", cubesdir
+ print("-> creating cubes directory", cubesdir)
try:
mkdir(cubesdir)
except OSError as err:
@@ -738,10 +739,10 @@
stat.sort()
stat.reverse()
total_time = sum(clocktime for clocktime, cputime, occ, rql in stat) * 0.01
- print 'Percentage;Cumulative Time (clock);Cumulative Time (CPU);Occurences;Query'
+ print('Percentage;Cumulative Time (clock);Cumulative Time (CPU);Occurences;Query')
for clocktime, cputime, occ, rql in stat:
- print '%.2f;%.2f;%.2f;%s;%s' % (clocktime/total_time, clocktime,
- cputime, occ, rql)
+ print('%.2f;%.2f;%.2f;%s;%s' % (clocktime/total_time, clocktime,
+ cputime, occ, rql))
class GenerateSchema(Command):
--- a/devtools/fill.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/fill.py Fri Sep 11 14:52:09 2015 +0200
@@ -17,6 +17,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""This modules defines func / methods for creating test repositories"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -363,7 +364,7 @@
rql += ', %s is %s' % (selectvar, objtype)
rset = cnx.execute(rql)
except Exception:
- print "could restrict eid_list with given constraints (%r)" % constraints
+ print("could restrict eid_list with given constraints (%r)" % constraints)
return []
return set(eid for eid, in rset.rows)
--- a/devtools/httptest.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/httptest.py Fri Sep 11 14:52:09 2015 +0200
@@ -18,6 +18,8 @@
"""this module contains base classes and utilities for integration with running
http server
"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
import random
@@ -178,5 +180,5 @@
self.stop_server()
except error.ReactorNotRunning as err:
# Server could be launched manually
- print err
+ print(err)
super(CubicWebServerTC, self).tearDown()
--- a/devtools/instrument.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/instrument.py Fri Sep 11 14:52:09 2015 +0200
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Instrumentation utilities"""
+from __future__ import print_function
import os
@@ -45,10 +46,10 @@
return _COLORS[key]
def warn(msg, *args):
- print 'WARNING: %s' % (msg % args)
+ print('WARNING: %s' % (msg % args))
def info(msg):
- print 'INFO: ' + msg
+ print('INFO: ' + msg)
class PropagationAnalyzer(object):
--- a/devtools/repotest.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/repotest.py Fri Sep 11 14:52:09 2015 +0200
@@ -19,6 +19,7 @@
This module contains functions to initialize a new repository.
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -66,7 +67,7 @@
self.assertEqual(len(step[-1]), len(expected[-1]),
'got %s child steps, expected %s' % (len(step[-1]), len(expected[-1])))
except AssertionError:
- print 'error on step ',
+ print('error on step ', end=' ')
pprint(step[:-1])
raise
children = step[-1]
--- a/devtools/stresstester.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/stresstester.py Fri Sep 11 14:52:09 2015 +0200
@@ -41,6 +41,7 @@
Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
http://www.logilab.fr/ -- mailto:contact@logilab.fr
"""
+from __future__ import print_function
import os
import sys
@@ -84,7 +85,7 @@
def usage(status=0):
"""print usage string and exit"""
- print __doc__ % basename(sys.argv[0])
+ print(__doc__ % basename(sys.argv[0]))
sys.exit(status)
@@ -133,7 +134,7 @@
'nb-times=', 'nb-threads=',
'profile', 'report-output=',])
except Exception as ex:
- print ex
+ print(ex)
usage(1)
repeat = 100
threads = 1
@@ -166,7 +167,7 @@
from cubicweb.cwconfig import instance_configuration
config = instance_configuration(args[0])
# get local access to the repository
- print "Creating repo", prof_file
+ print("Creating repo", prof_file)
repo = Repository(config, prof_file)
cnxid = repo.connect(user, password=password)
# connection to the CubicWeb repository
--- a/devtools/testlib.py Fri Sep 11 14:28:06 2015 +0200
+++ b/devtools/testlib.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,8 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""this module contains base classes and utilities for cubicweb tests"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
import sys
@@ -1164,7 +1166,7 @@
cnx.execute(rql, args)
except ValidationError as ex:
# failed to satisfy some constraint
- print 'error in automatic db population', ex
+ print('error in automatic db population', ex)
cnx.commit_state = None # reset uncommitable flag
self.post_populate(cnx)
--- a/doc/tools/mode_plan.py Fri Sep 11 14:28:06 2015 +0200
+++ b/doc/tools/mode_plan.py Fri Sep 11 14:52:09 2015 +0200
@@ -23,17 +23,19 @@
rename A010-joe.en.txt to A030-joe.en.txt
accept [y/N]?
"""
+from __future__ import print_function
+
def ren(a,b):
names = glob.glob('%s*'%a)
for name in names :
- print 'rename %s to %s' % (name, name.replace(a,b))
+ print('rename %s to %s' % (name, name.replace(a,b)))
if raw_input('accept [y/N]?').lower() =='y':
for name in names:
os.system('hg mv %s %s' % (name, name.replace(a,b)))
-def ls(): print '\n'.join(sorted(os.listdir('.')))
+def ls(): print('\n'.join(sorted(os.listdir('.'))))
def move():
filenames = []
@@ -47,4 +49,4 @@
for num, name in filenames:
if num >= start:
- print 'hg mv %s %2i%s' %(name,num+1,name[2:])
+ print('hg mv %s %2i%s' %(name,num+1,name[2:]))
--- a/entities/wfobjs.py Fri Sep 11 14:28:06 2015 +0200
+++ b/entities/wfobjs.py Fri Sep 11 14:52:09 2015 +0200
@@ -21,6 +21,7 @@
* workflow history (TrInfo)
* adapter for workflowable entities (IWorkflowableAdapter)
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -224,19 +225,19 @@
matches = user.matching_groups(groups)
if matches:
if DBG:
- print 'may_be_fired: %r may fire: user matches %s' % (self.name, groups)
+ print('may_be_fired: %r may fire: user matches %s' % (self.name, groups))
return matches
if 'owners' in groups and user.owns(eid):
if DBG:
- print 'may_be_fired: %r may fire: user is owner' % self.name
+ print('may_be_fired: %r may fire: user is owner' % self.name)
return True
# check one of the rql expression conditions matches if any
if self.condition:
if DBG:
- print ('my_be_fired: %r: %s' %
- (self.name, [(rqlexpr.expression,
+ print('my_be_fired: %r: %s' %
+ (self.name, [(rqlexpr.expression,
rqlexpr.check_expression(self._cw, eid))
- for rqlexpr in self.condition]))
+ for rqlexpr in self.condition]))
for rqlexpr in self.condition:
if rqlexpr.check_expression(self._cw, eid):
return True
--- a/etwist/service.py Fri Sep 11 14:28:06 2015 +0200
+++ b/etwist/service.py Fri Sep 11 14:52:09 2015 +0200
@@ -15,6 +15,8 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import print_function
+
import os
import sys
@@ -22,7 +24,7 @@
import win32serviceutil
import win32service
except ImportError:
- print 'Win32 extensions for Python are likely not installed.'
+ print('Win32 extensions for Python are likely not installed.')
sys.exit(3)
from os.path import join
--- a/i18n.py Fri Sep 11 14:28:06 2015 +0200
+++ b/i18n.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""Some i18n/gettext utilities."""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -80,7 +81,7 @@
"""
from subprocess import CalledProcessError
from logilab.common.fileutils import ensure_fs_mode
- print '-> compiling message catalogs to %s' % destdir
+ print('-> compiling message catalogs to %s' % destdir)
errors = []
for lang in langs:
langdir = join(destdir, lang, 'LC_MESSAGES')
--- a/migration.py Fri Sep 11 14:28:06 2015 +0200
+++ b/migration.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""utilities for instances migration"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -49,7 +50,7 @@
assert fromversion <= toversion, (fromversion, toversion)
if not exists(directory):
if not quiet:
- print directory, "doesn't exists, no migration path"
+ print(directory, "doesn't exists, no migration path")
return []
if fromversion == toversion:
return []
@@ -93,9 +94,9 @@
stream = open(scriptpath)
scriptcontent = stream.read()
stream.close()
- print
- print scriptcontent
- print
+ print()
+ print(scriptcontent)
+ print()
else:
return True
@@ -240,7 +241,7 @@
# avoid '_' to be added to builtins by sys.display_hook
def do_not_add___to_builtins(obj):
if obj is not None:
- print repr(obj)
+ print(repr(obj))
sys.displayhook = do_not_add___to_builtins
local_ctx = self._create_context()
try:
@@ -519,9 +520,9 @@
elif op == None:
continue
else:
- print ('unable to handle %s in %s, set to `%s %s` '
- 'but currently up to `%s %s`' %
- (cube, source, oper, version, op, ver))
+ print('unable to handle %s in %s, set to `%s %s` '
+ 'but currently up to `%s %s`' %
+ (cube, source, oper, version, op, ver))
# "solve" constraint satisfaction problem
if cube not in self.cubes:
self.errors.append( ('add', cube, version, source) )
@@ -533,4 +534,4 @@
elif oper is None:
pass # no constraint on version
else:
- print 'unknown operator', oper
+ print('unknown operator', oper)
--- a/misc/migration/3.14.0_Any.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/migration/3.14.0_Any.py Fri Sep 11 14:52:09 2015 +0200
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
config['rql-cache-size'] = config['rql-cache-size'] * 10
add_entity_type('CWDataImport')
@@ -10,4 +12,4 @@
mainvars = guess_rrqlexpr_mainvars(expression)
yamscstr = CONSTRAINTS[rqlcstr.type](expression, mainvars)
rqlcstr.cw_set(value=yamscstr.serialize())
- print 'updated', rqlcstr.type, rqlcstr.value.strip()
+ print('updated', rqlcstr.type, rqlcstr.value.strip())
--- a/misc/migration/3.15.4_Any.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/migration/3.15.4_Any.py Fri Sep 11 14:52:09 2015 +0200
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
from logilab.common.shellutils import generate_password
from cubicweb.server.utils import crypt_password
@@ -5,7 +7,7 @@
salt = user.upassword.getvalue()
if crypt_password('', salt) == salt:
passwd = generate_password()
- print 'setting random password for user %s' % user.login
+ print('setting random password for user %s' % user.login)
user.set_attributes(upassword=passwd)
commit()
--- a/misc/migration/3.21.0_Any.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/migration/3.21.0_Any.py Fri Sep 11 14:52:09 2015 +0200
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server.schema2sql import rschema_has_table
@@ -27,7 +29,7 @@
' SELECT eid FROM entities) AS eids' % args,
ask_confirm=False)[0][0]
if count:
- print '%s references %d unknown entities, deleting' % (rschema, count)
+ print('%s references %d unknown entities, deleting' % (rschema, count))
sql('DELETE FROM %(r)s_relation '
'WHERE eid_from IN (SELECT eid_from FROM %(r)s_relation EXCEPT SELECT eid FROM entities)' % args)
sql('DELETE FROM %(r)s_relation '
@@ -54,14 +56,14 @@
broken_eids = sql('SELECT cw_eid FROM cw_%(e)s WHERE cw_%(r)s IS NULL' % args,
ask_confirm=False)
if broken_eids:
- print 'Required relation %(e)s.%(r)s missing' % args
+ print('Required relation %(e)s.%(r)s missing' % args)
args['eids'] = ', '.join(str(eid) for eid, in broken_eids)
rql('DELETE %(e)s X WHERE X eid IN (%(eids)s)' % args)
broken_eids = sql('SELECT cw_eid FROM cw_%(e)s WHERE cw_%(r)s IN (SELECT cw_%(r)s FROM cw_%(e)s '
'EXCEPT SELECT eid FROM entities)' % args,
ask_confirm=False)
if broken_eids:
- print 'Required relation %(e)s.%(r)s references unknown objects, deleting subject entities' % args
+ print('Required relation %(e)s.%(r)s references unknown objects, deleting subject entities' % args)
args['eids'] = ', '.join(str(eid) for eid, in broken_eids)
rql('DELETE %(e)s X WHERE X eid IN (%(eids)s)' % args)
else:
@@ -70,7 +72,7 @@
' EXCEPT'
' SELECT eid FROM entities) AS eids' % args,
ask_confirm=False)[0][0]:
- print '%(e)s.%(r)s references unknown entities, deleting relation' % args
+ print('%(e)s.%(r)s references unknown entities, deleting relation' % args)
sql('UPDATE cw_%(e)s SET cw_%(r)s = NULL WHERE cw_%(r)s IS NOT NULL AND cw_%(r)s IN '
'(SELECT cw_%(r)s FROM cw_%(e)s EXCEPT SELECT eid FROM entities)' % args)
sql('ALTER TABLE cw_%(e)s DROP CONSTRAINT IF EXISTS %(c)s' % args,
@@ -87,7 +89,7 @@
' EXCEPT'
' SELECT eid FROM entities) AS eids' % args,
ask_confirm=False)[0][0]:
- print '%(e)s has nonexistent entities, deleting' % args
+ print('%(e)s has nonexistent entities, deleting' % args)
sql('DELETE FROM cw_%(e)s WHERE cw_eid IN '
'(SELECT cw_eid FROM cw_%(e)s EXCEPT SELECT eid FROM entities)' % args)
sql('ALTER TABLE cw_%(e)s DROP CONSTRAINT IF EXISTS cw_%(e)s_cw_eid_fkey' % args,
--- a/misc/migration/3.8.5_Any.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/migration/3.8.5_Any.py Fri Sep 11 14:52:09 2015 +0200
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
def migrate_varchar_to_nvarchar():
dbdriver = config.system_source_config['db-driver']
if dbdriver != "sqlserver2005":
@@ -52,7 +54,7 @@
for statement in generated_statements:
- print statement
+ print(statement)
sql(statement, ask_confirm=False)
commit()
--- a/misc/migration/bootstrapmigration_repository.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/migration/bootstrapmigration_repository.py Fri Sep 11 14:52:09 2015 +0200
@@ -19,6 +19,7 @@
it should only include low level schema changes
"""
+from __future__ import print_function
from cubicweb import ConfigurationError
from cubicweb.server.session import hooks_control
@@ -77,8 +78,8 @@
sql('ALTER TABLE "entities" DROP COLUMN "mtime"')
sql('ALTER TABLE "entities" DROP COLUMN "source"')
except: # programming error, already migrated
- print "Failed to drop mtime or source database columns"
- print "'entities' table of the database has probably been already updated"
+ print("Failed to drop mtime or source database columns")
+ print("'entities' table of the database has probably been already updated")
commit()
@@ -101,7 +102,7 @@
driver = config.system_source_config['db-driver']
if not (driver == 'postgres' or driver.startswith('sqlserver')):
import sys
- print >>sys.stderr, 'This migration is not supported for backends other than sqlserver or postgres (yet).'
+ print('This migration is not supported for backends other than sqlserver or postgres (yet).', file=sys.stderr)
sys.exit(1)
add_relation_definition('CWAttribute', 'add_permission', 'CWGroup')
@@ -196,7 +197,7 @@
(rschema.type, ','.join(subjects))))
if martians:
martians = ','.join(martians)
- print 'deleting broken relations %s for eids %s' % (rschema.type, martians)
+ print('deleting broken relations %s for eids %s' % (rschema.type, martians))
sql('DELETE FROM %s_relation WHERE eid_from IN (%s) OR eid_to IN (%s)' % (rschema.type, martians, martians))
with session.deny_all_hooks_but():
rql('SET X %(r)s Y WHERE Y %(r)s X, NOT X %(r)s Y' % {'r': rschema.type})
@@ -219,20 +220,20 @@
if driver == 'postgres':
for indexname, in sql('select indexname from pg_indexes'):
if indexname.startswith('unique_'):
- print 'dropping index', indexname
+ print('dropping index', indexname)
sql('DROP INDEX %s' % indexname)
commit()
elif driver.startswith('sqlserver'):
for viewname, in sql('select name from sys.views'):
if viewname.startswith('utv_'):
- print 'dropping view (index should be cascade-deleted)', viewname
+ print('dropping view (index should be cascade-deleted)', viewname)
sql('DROP VIEW %s' % viewname)
commit()
# recreate the constraints, hook will lead to low-level recreation
for eschema in sorted(schema.entities()):
if eschema._unique_together:
- print 'recreate unique indexes for', eschema
+ print('recreate unique indexes for', eschema)
rql_args = schemaserial.uniquetogether2rqls(eschema)
for rql, args in rql_args:
args['x'] = eschema.eid
@@ -243,10 +244,10 @@
for rschema in sorted(schema.relations()):
if rschema.final:
if rschema.type in fsschema:
- print 'sync perms for', rschema.type
+ print('sync perms for', rschema.type)
sync_schema_props_perms(rschema.type, syncprops=False, ask_confirm=False, commit=False)
else:
- print 'WARNING: attribute %s missing from fs schema' % rschema.type
+ print('WARNING: attribute %s missing from fs schema' % rschema.type)
commit()
if applcubicwebversion < (3, 17, 0) and cubicwebversion >= (3, 17, 0):
--- a/misc/migration/postcreate.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/migration/postcreate.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""cubicweb post creation script, set user's workflow"""
+from __future__ import print_function
# insert versions
create_entity('CWProperty', pkey=u'system.version.cubicweb',
@@ -46,8 +47,8 @@
if hasattr(config, 'anonymous_user'):
anonlogin, anonpwd = config.anonymous_user()
if anonlogin == session.user.login:
- print 'you are using a manager account as anonymous user.'
- print 'Hopefully this is not a production instance...'
+ print('you are using a manager account as anonymous user.')
+ print('Hopefully this is not a production instance...')
elif anonlogin:
from cubicweb.server import create_user
create_user(session, unicode(anonlogin), anonpwd, u'guests')
--- a/misc/scripts/cwuser_ldap2system.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/scripts/cwuser_ldap2system.py Fri Sep 11 14:52:09 2015 +0200
@@ -1,3 +1,5 @@
+from __future__ import print_function
+
import base64
from cubicweb.server.utils import crypt_password
@@ -20,10 +22,10 @@
rset = sql("SELECT eid,type,source,extid,mtime FROM entities WHERE source!='system'", ask_confirm=False)
for eid, type, source, extid, mtime in rset:
if type != 'CWUser':
- print "don't know what to do with entity type", type
+ print("don't know what to do with entity type", type)
continue
if not source.lower().startswith('ldap'):
- print "don't know what to do with source type", source
+ print("don't know what to do with source type", source)
continue
extid = base64.decodestring(extid)
ldapinfos = [x.strip().split('=') for x in extid.split(',')]
@@ -33,7 +35,7 @@
args = dict(eid=eid, type=type, source=source, login=login,
firstname=firstname, surname=surname, mtime=mtime,
pwd=dbhelper.binary_value(crypt_password('toto')))
- print args
+ print(args)
sql(insert, args)
sql(update, args)
--- a/misc/scripts/detect_cycle.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/scripts/detect_cycle.py Fri Sep 11 14:52:09 2015 +0200
@@ -1,9 +1,10 @@
+from __future__ import print_function
try:
rtype, = __args__
except ValueError:
- print 'USAGE: cubicweb-ctl shell <instance> detect_cycle.py -- <relation type>'
- print
+ print('USAGE: cubicweb-ctl shell <instance> detect_cycle.py -- <relation type>')
+ print()
graph = {}
for fromeid, toeid in rql('Any X,Y WHERE X %s Y' % rtype):
@@ -12,4 +13,4 @@
from logilab.common.graph import get_cycles
for cycle in get_cycles(graph):
- print 'cycle', '->'.join(str(n) for n in cycle)
+ print('cycle', '->'.join(str(n) for n in cycle))
--- a/misc/scripts/ldap_change_base_dn.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/scripts/ldap_change_base_dn.py Fri Sep 11 14:52:09 2015 +0200
@@ -1,10 +1,12 @@
+from __future__ import print_function
+
from base64 import b64decode, b64encode
try:
uri, newdn = __args__
except ValueError:
- print 'USAGE: cubicweb-ctl shell <instance> ldap_change_base_dn.py -- <ldap source uri> <new dn>'
- print
- print 'you should not have updated your sources file yet'
+ print('USAGE: cubicweb-ctl shell <instance> ldap_change_base_dn.py -- <ldap source uri> <new dn>')
+ print()
+ print('you should not have updated your sources file yet')
olddn = repo.sources_by_uri[uri].config['user-base-dn']
@@ -16,9 +18,9 @@
olduserdn = b64decode(extid)
newuserdn = olduserdn.replace(olddn, newdn)
if newuserdn != olduserdn:
- print olduserdn, '->', newuserdn
+ print(olduserdn, '->', newuserdn)
sql("UPDATE entities SET extid='%s' WHERE eid=%s" % (b64encode(newuserdn), eid))
commit()
-print 'you can now update the sources file to the new dn and restart the instance'
+print('you can now update the sources file to the new dn and restart the instance')
--- a/misc/scripts/ldapuser2ldapfeed.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/scripts/ldapuser2ldapfeed.py Fri Sep 11 14:52:09 2015 +0200
@@ -2,6 +2,8 @@
Once this script is run, execute c-c db-check to cleanup relation tables.
"""
+from __future__ import print_function
+
import sys
from collections import defaultdict
from logilab.common.shellutils import generate_password
@@ -14,12 +16,12 @@
' on the command line)')
sys.exit(1)
except KeyError:
- print '%s is not an active source' % source_name
+ print('%s is not an active source' % source_name)
sys.exit(1)
# check source is reachable before doing anything
if not source.get_connection().cnx:
- print '%s is not reachable. Fix this before running this script' % source_name
+ print('%s is not reachable. Fix this before running this script' % source_name)
sys.exit(1)
raw_input('Ensure you have shutdown all instances of this application before continuing.'
@@ -31,7 +33,7 @@
from cubicweb.server.edition import EditedEntity
-print '******************** backport entity content ***************************'
+print('******************** backport entity content ***************************')
todelete = defaultdict(list)
extids = set()
@@ -39,17 +41,17 @@
for entity in rql('Any X WHERE X cw_source S, S eid %(s)s', {'s': source.eid}).entities():
etype = entity.cw_etype
if not source.support_entity(etype):
- print "source doesn't support %s, delete %s" % (etype, entity.eid)
+ print("source doesn't support %s, delete %s" % (etype, entity.eid))
todelete[etype].append(entity)
continue
try:
entity.complete()
except Exception:
- print '%s %s much probably deleted, delete it (extid %s)' % (
- etype, entity.eid, entity.cw_metainformation()['extid'])
+ print('%s %s much probably deleted, delete it (extid %s)' % (
+ etype, entity.eid, entity.cw_metainformation()['extid']))
todelete[etype].append(entity)
continue
- print 'get back', etype, entity.eid
+ print('get back', etype, entity.eid)
entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
if not entity.creation_date:
entity.cw_edited['creation_date'] = datetime.now()
@@ -61,7 +63,7 @@
if not entity.cwuri:
entity.cw_edited['cwuri'] = '%s/?dn=%s' % (
source.urls[0], extid.decode('utf-8', 'ignore'))
- print entity.cw_edited
+ print(entity.cw_edited)
if extid in extids:
duplicates.append(extid)
continue
@@ -73,13 +75,13 @@
# only cleanup entities table, remaining stuff should be cleaned by a c-c
# db-check to be run after this script
if duplicates:
- print 'found %s duplicate entries' % len(duplicates)
+ print('found %s duplicate entries' % len(duplicates))
from pprint import pprint
pprint(duplicates)
-print len(todelete), 'entities will be deleted'
+print(len(todelete), 'entities will be deleted')
for etype, entities in todelete.iteritems():
- print 'deleting', etype, [e.login for e in entities]
+ print('deleting', etype, [e.login for e in entities])
system_source.delete_info_multi(session, entities, source_name)
@@ -89,9 +91,9 @@
if raw_input('Commit?') in 'yY':
- print 'committing'
+ print('committing')
commit()
else:
rollback()
- print 'rolled back'
+ print('rolled back')
--- a/misc/scripts/pyroforge2datafeed.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/scripts/pyroforge2datafeed.py Fri Sep 11 14:52:09 2015 +0200
@@ -2,6 +2,8 @@
Once this script is run, execute c-c db-check to cleanup relation tables.
"""
+from __future__ import print_function
+
import sys
try:
@@ -12,14 +14,14 @@
' on the command line)')
sys.exit(1)
except KeyError:
- print '%s is not an active source' % source_name
+ print('%s is not an active source' % source_name)
sys.exit(1)
# check source is reachable before doing anything
try:
source.get_connection()._repo
except AttributeError:
- print '%s is not reachable. Fix this before running this script' % source_name
+ print('%s is not reachable. Fix this before running this script' % source_name)
sys.exit(1)
raw_input('Ensure you have shutdown all instances of this application before continuing.'
@@ -39,7 +41,7 @@
))
-print '******************** backport entity content ***************************'
+print('******************** backport entity content ***************************')
from cubicweb.server import debugged
todelete = {}
@@ -47,20 +49,20 @@
for entity in rql('Any X WHERE X cw_source S, S eid %(s)s', {'s': source.eid}).entities():
etype = entity.cw_etype
if not source.support_entity(etype):
- print "source doesn't support %s, delete %s" % (etype, entity.eid)
+ print("source doesn't support %s, delete %s" % (etype, entity.eid))
elif etype in DONT_GET_BACK_ETYPES:
- print 'ignore %s, delete %s' % (etype, entity.eid)
+ print('ignore %s, delete %s' % (etype, entity.eid))
else:
try:
entity.complete()
if not host in entity.cwuri:
- print 'SKIP foreign entity', entity.cwuri, source.config['base-url']
+ print('SKIP foreign entity', entity.cwuri, source.config['base-url'])
continue
except Exception:
- print '%s %s much probably deleted, delete it (extid %s)' % (
- etype, entity.eid, entity.cw_metainformation()['extid'])
+ print('%s %s much probably deleted, delete it (extid %s)' % (
+ etype, entity.eid, entity.cw_metainformation()['extid']))
else:
- print 'get back', etype, entity.eid
+ print('get back', etype, entity.eid)
entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
system_source.add_entity(session, entity)
sql("UPDATE entities SET asource=%(asource)s, source='system', extid=%(extid)s "
@@ -76,7 +78,7 @@
system_source.delete_info_multi(session, entities, source_name)
-print '******************** backport mapping **********************************'
+print('******************** backport mapping **********************************')
session.disable_hook_categories('cw.sources')
mapping = []
for mappart in rql('Any X,SCH WHERE X cw_schema SCH, X cw_for_source S, S eid %(s)s',
@@ -91,7 +93,7 @@
continue
if rdef.subject in DONT_GET_BACK_ETYPES \
and rdef.object in DONT_GET_BACK_ETYPES:
- print 'dont map', rdef
+ print('dont map', rdef)
continue
if rdef.subject in DONT_GET_BACK_ETYPES:
options = u'action=link\nlinkattr=name'
@@ -105,7 +107,7 @@
roles = 'object',
else:
roles = 'subject',
- print 'map', rdef, options, roles
+ print('map', rdef, options, roles)
for role in roles:
mapping.append( (
(str(rdef.subject), str(rdef.rtype), str(rdef.object)),
--- a/misc/scripts/repair_file_1-9_migration.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/scripts/repair_file_1-9_migration.py Fri Sep 11 14:52:09 2015 +0200
@@ -4,13 +4,14 @@
* on our intranet on July 07 2010
* on our extranet on July 16 2010
"""
+from __future__ import print_function
try:
backupinstance, = __args__
except ValueError:
- print 'USAGE: cubicweb-ctl shell <instance> repair_file_1-9_migration.py -- <backup instance id>'
- print
- print 'you should restored the backup on a new instance, accessible through pyro'
+ print('USAGE: cubicweb-ctl shell <instance> repair_file_1-9_migration.py -- <backup instance id>')
+ print()
+ print('you should restored the backup on a new instance, accessible through pyro')
from cubicweb import cwconfig, dbapi
from cubicweb.server.session import hooks_control
@@ -32,20 +33,20 @@
'XX from_entity YY, YY name "File")'):
if rtype in ('is', 'is_instance_of'):
continue
- print rtype
+ print(rtype)
for feid, xeid in backupcu.execute('Any F,X WHERE F %s X, F is IN (File,Image)' % rtype):
- print 'restoring relation %s between file %s and %s' % (rtype, feid, xeid),
- print rql('SET F %s X WHERE F eid %%(f)s, X eid %%(x)s, NOT F %s X' % (rtype, rtype),
- {'f': feid, 'x': xeid})
+ print('restoring relation %s between file %s and %s' % (rtype, feid, xeid), end=' ')
+ print(rql('SET F %s X WHERE F eid %%(f)s, X eid %%(x)s, NOT F %s X' % (rtype, rtype),
+ {'f': feid, 'x': xeid}))
for rtype, in backupcu.execute('DISTINCT Any RTN WHERE X relation_type RT, RT name RTN,'
'X to_entity Y, Y name "Image", X is CWRelation, '
'EXISTS(XX is CWRelation, XX relation_type RT, '
'XX to_entity YY, YY name "File")'):
- print rtype
+ print(rtype)
for feid, xeid in backupcu.execute('Any F,X WHERE X %s F, F is IN (File,Image)' % rtype):
- print 'restoring relation %s between %s and file %s' % (rtype, xeid, feid),
- print rql('SET X %s F WHERE F eid %%(f)s, X eid %%(x)s, NOT X %s F' % (rtype, rtype),
- {'f': feid, 'x': xeid})
+ print('restoring relation %s between %s and file %s' % (rtype, xeid, feid), end=' ')
+ print(rql('SET X %s F WHERE F eid %%(f)s, X eid %%(x)s, NOT X %s F' % (rtype, rtype),
+ {'f': feid, 'x': xeid}))
commit()
--- a/misc/scripts/repair_splitbrain_ldapuser_source.py Fri Sep 11 14:28:06 2015 +0200
+++ b/misc/scripts/repair_splitbrain_ldapuser_source.py Fri Sep 11 14:52:09 2015 +0200
@@ -14,6 +14,7 @@
deciding to apply it for you. And then ADAPT it tou your needs.
"""
+from __future__ import print_function
import base64
from collections import defaultdict
@@ -28,12 +29,12 @@
' on the command line)')
sys.exit(1)
except KeyError:
- print '%s is not an active source' % source_name
+ print('%s is not an active source' % source_name)
sys.exit(1)
# check source is reachable before doing anything
if not source.get_connection().cnx:
- print '%s is not reachable. Fix this before running this script' % source_name
+ print('%s is not reachable. Fix this before running this script' % source_name)
sys.exit(1)
def find_dupes():
@@ -52,11 +53,11 @@
CWUser = schema['CWUser']
for extid, eids in dupes.items():
newest = eids.pop() # we merge everything on the newest
- print 'merging ghosts of', extid, 'into', newest
+ print('merging ghosts of', extid, 'into', newest)
# now we merge pairwise into the newest
for old in eids:
subst = {'old': old, 'new': newest}
- print ' merging', old
+ print(' merging', old)
gone_eids.append(old)
for rschema in CWUser.subject_relations():
if rschema.final or rschema == 'identity':
@@ -83,24 +84,24 @@
rollback()
return
commit() # XXX flushing operations is wanted rather than really committing
- print 'clean up entities table'
+ print('clean up entities table')
sql('DELETE FROM entities WHERE eid IN (%s)' % (', '.join(str(x) for x in gone_eids)))
commit()
def main():
dupes = find_dupes()
if not dupes:
- print 'No duplicate user'
+ print('No duplicate user')
return
- print 'Found %s duplicate user instances' % len(dupes)
+ print('Found %s duplicate user instances' % len(dupes))
while True:
- print 'Fix or dry-run? (f/d) ... or Ctrl-C to break out'
+ print('Fix or dry-run? (f/d) ... or Ctrl-C to break out')
answer = raw_input('> ')
if answer.lower() not in 'fd':
continue
- print 'Please STOP THE APPLICATION INSTANCES (service or interactive), and press Return when done.'
+ print('Please STOP THE APPLICATION INSTANCES (service or interactive), and press Return when done.')
raw_input('<I swear all running instances and workers of the application are stopped>')
with hooks_control(session, session.HOOKS_DENY_ALL):
merge_dupes(dupes, docommit=answer=='f')
--- a/schema.py Fri Sep 11 14:28:06 2015 +0200
+++ b/schema.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""classes to define schemas for CubicWeb"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
_ = unicode
@@ -665,7 +666,7 @@
groups = self.get_groups(action)
if _cw.user.matching_groups(groups):
if DBG:
- print ('check_perm: %r %r: user matches %s' % (action, _self_str, groups))
+ print('check_perm: %r %r: user matches %s' % (action, _self_str, groups))
return
# if 'owners' in allowed groups, check if the user actually owns this
# object, if so that's enough
@@ -676,14 +677,14 @@
kwargs.get('creating')
or ('eid' in kwargs and _cw.user.owns(kwargs['eid']))):
if DBG:
- print ('check_perm: %r %r: user is owner or creation time' %
- (action, _self_str))
+ print('check_perm: %r %r: user is owner or creation time' %
+ (action, _self_str))
return
# else if there is some rql expressions, check them
if DBG:
- print ('check_perm: %r %r %s' %
- (action, _self_str, [(rqlexpr, kwargs, rqlexpr.check(_cw, **kwargs))
- for rqlexpr in self.get_rqlexprs(action)]))
+ print('check_perm: %r %r %s' %
+ (action, _self_str, [(rqlexpr, kwargs, rqlexpr.check(_cw, **kwargs))
+ for rqlexpr in self.get_rqlexprs(action)]))
if any(rqlexpr.check(_cw, **kwargs)
for rqlexpr in self.get_rqlexprs(action)):
return
--- a/server/__init__.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/__init__.py Fri Sep 11 14:52:09 2015 +0200
@@ -20,6 +20,7 @@
The server module contains functions to initialize a new repository.
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -245,7 +246,7 @@
remainings = filter(drop_filter, helper.list_tables(sqlcursor))
assert not remainings, 'Remaining tables: %s' % ', '.join(remainings)
_title = '-> creating tables '
- print _title,
+ print(_title, end=' ')
# schema entities and relations tables
# can't skip entities table even if system source doesn't support them,
# they are used sometimes by generated sql. Keeping them empty is much
@@ -255,8 +256,8 @@
# if not repo.system_source.support_entity(str(e))])
failed = sqlexec(schemasql, execute, pbtitle=_title, delimiter=';;')
if failed:
- print 'The following SQL statements failed. You should check your schema.'
- print failed
+ print('The following SQL statements failed. You should check your schema.')
+ print(failed)
raise Exception('execution of the sql schema failed, you should check your schema')
sqlcursor.close()
sqlcnx.commit()
@@ -267,7 +268,7 @@
repo.system_source.eid = ssource.eid
cnx.execute('SET X cw_source X WHERE X eid %(x)s', {'x': ssource.eid})
# insert base groups and default admin
- print '-> inserting default user and default groups.'
+ print('-> inserting default user and default groups.')
try:
login = unicode(sourcescfg['admin']['login'])
pwd = sourcescfg['admin']['password']
@@ -310,7 +311,7 @@
# (drop instance attribute to get back to class attribute)
del config.cubicweb_appobject_path
del config.cube_appobject_path
- print '-> database for instance %s initialized.' % config.appid
+ print('-> database for instance %s initialized.' % config.appid)
def initialize_schema(config, schema, mhandler, event='create'):
--- a/server/checkintegrity.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/checkintegrity.py Fri Sep 11 14:52:09 2015 +0200
@@ -20,6 +20,8 @@
* integrity of a CubicWeb repository. Hum actually only the system database is
checked.
"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
import sys
@@ -90,11 +92,11 @@
dbhelper = repo.system_source.dbhelper
cursor = cnx.cnxset.cu
if not dbhelper.has_fti_table(cursor):
- print 'no text index table'
+ print('no text index table')
dbhelper.init_fti(cursor)
repo.system_source.do_fti = True # ensure full-text indexation is activated
if etypes is None:
- print 'Reindexing entities'
+ print('Reindexing entities')
etypes = set()
for eschema in schema.entities():
if eschema.final:
@@ -107,8 +109,8 @@
# clear fti table first
cnx.system_sql('DELETE FROM %s' % dbhelper.fti_table)
else:
- print 'Reindexing entities of type %s' % \
- ', '.join(sorted(str(e) for e in etypes))
+ print('Reindexing entities of type %s' % \
+ ', '.join(sorted(str(e) for e in etypes)))
# clear fti table first. Use subquery for sql compatibility
cnx.system_sql("DELETE FROM %s WHERE EXISTS(SELECT 1 FROM ENTITIES "
"WHERE eid=%s AND type IN (%s))" % (
@@ -135,7 +137,7 @@
def check_schema(schema, cnx, eids, fix=1):
"""check serialized schema"""
- print 'Checking serialized schema'
+ print('Checking serialized schema')
unique_constraints = ('SizeConstraint', 'FormatConstraint',
'VocabularyConstraint',
'RQLVocabularyConstraint')
@@ -147,16 +149,16 @@
if count == 1:
continue
if cstrname in unique_constraints:
- print "ERROR: got %s %r constraints on relation %s.%s.%s" % (
- count, cstrname, sn, rn, on)
+ print("ERROR: got %s %r constraints on relation %s.%s.%s" % (
+ count, cstrname, sn, rn, on))
if fix:
- print 'dunno how to fix, do it yourself'
+ print('dunno how to fix, do it yourself')
def check_text_index(schema, cnx, eids, fix=1):
"""check all entities registered in the text index"""
- print 'Checking text index'
+ print('Checking text index')
msg = ' Entity with eid %s exists in the text index but in no source (autofix will remove from text index)'
cursor = cnx.system_sql('SELECT uid FROM appears;')
for row in cursor.fetchall():
@@ -170,7 +172,7 @@
def check_entities(schema, cnx, eids, fix=1):
"""check all entities registered in the repo system table"""
- print 'Checking entities system table'
+ print('Checking entities system table')
# system table but no source
msg = ' Entity %s with eid %s exists in the system table but in no source (autofix will delete the entity)'
cursor = cnx.system_sql('SELECT eid,type FROM entities;')
@@ -228,7 +230,7 @@
'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_instance_of_relation as cs '
' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
notify_fixed(True)
- print 'Checking entities tables'
+ print('Checking entities tables')
msg = ' Entity with eid %s exists in the %s table but not in the system table (autofix will delete the entity)'
for eschema in schema.entities():
if eschema.final:
@@ -263,7 +265,7 @@
"""check that eids referenced by relations are registered in the repo system
table
"""
- print 'Checking relations'
+ print('Checking relations')
for rschema in schema.relations():
if rschema.final or rschema.type in PURE_VIRTUAL_RTYPES:
continue
@@ -287,7 +289,7 @@
cursor = cnx.system_sql('SELECT eid_from FROM %s_relation;' % rschema)
except Exception as ex:
# usually because table doesn't exist
- print 'ERROR', ex
+ print('ERROR', ex)
continue
for row in cursor.fetchall():
eid = row[0]
@@ -310,7 +312,7 @@
def check_mandatory_relations(schema, cnx, eids, fix=1):
"""check entities missing some mandatory relation"""
- print 'Checking mandatory relations'
+ print('Checking mandatory relations')
msg = '%s #%s is missing mandatory %s relation %s (autofix will delete the entity)'
for rschema in schema.relations():
if rschema.final or rschema in PURE_VIRTUAL_RTYPES or rschema in ('is', 'is_instance_of'):
@@ -340,7 +342,7 @@
"""check for entities stored in the system source missing some mandatory
attribute
"""
- print 'Checking mandatory attributes'
+ print('Checking mandatory attributes')
msg = '%s #%s is missing mandatory attribute %s (autofix will delete the entity)'
for rschema in schema.relations():
if not rschema.final or rschema in VIRTUAL_RTYPES:
@@ -361,7 +363,7 @@
FIXME: rewrite using RQL queries ?
"""
- print 'Checking metadata'
+ print('Checking metadata')
cursor = cnx.system_sql("SELECT DISTINCT type FROM entities;")
eidcolumn = SQL_PREFIX + 'eid'
msg = ' %s with eid %s has no %s (autofix will set it to now)'
@@ -403,9 +405,9 @@
if fix:
cnx.commit()
else:
- print
+ print()
if not fix:
- print 'WARNING: Diagnostic run, nothing has been corrected'
+ print('WARNING: Diagnostic run, nothing has been corrected')
if reindex:
cnx.rollback()
reindex_entities(repo.schema, cnx, withpb=withpb)
--- a/server/hook.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/hook.py Fri Sep 11 14:52:09 2015 +0200
@@ -248,6 +248,8 @@
.. autoclass:: cubicweb.server.hook.LateOperation
.. autoclass:: cubicweb.server.hook.DataOperationMixIn
"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
from warnings import warn
@@ -331,7 +333,7 @@
with cnx.running_hooks_ops():
for hook in hooks:
if debug:
- print event, _kwargs, hook
+ print(event, _kwargs, hook)
hook()
def get_pruned_hooks(self, cnx, event, entities, eids_from_to, kwargs):
--- a/server/migractions.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/migractions.py Fri Sep 11 14:52:09 2015 +0200
@@ -26,6 +26,8 @@
* add an entity
* execute raw RQL queries
"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
import sys
@@ -137,18 +139,18 @@
try:
self.cnx = repoapi.connect(self.repo, login, password=pwd)
if not 'managers' in self.cnx.user.groups:
- print 'migration need an account in the managers group'
+ print('migration need an account in the managers group')
else:
break
except AuthenticationError:
- print 'wrong user/password'
+ print('wrong user/password')
except (KeyboardInterrupt, EOFError):
- print 'aborting...'
+ print('aborting...')
sys.exit(0)
try:
login, pwd = manager_userpasswd()
except (KeyboardInterrupt, EOFError):
- print 'aborting...'
+ print('aborting...')
sys.exit(0)
self.session = self.repo._get_session(self.cnx.sessionid)
@@ -199,10 +201,10 @@
# check backup has to be done
if osp.exists(backupfile) and not \
self.confirm('Backup file %s exists, overwrite it?' % backupfile):
- print '-> no backup done.'
+ print('-> no backup done.')
return
elif askconfirm and not self.confirm('Backup %s database?' % config.appid):
- print '-> no backup done.'
+ print('-> no backup done.')
return
open(backupfile,'w').close() # kinda lock
os.chmod(backupfile, 0600)
@@ -214,7 +216,7 @@
try:
source.backup(osp.join(tmpdir, source.uri), self.confirm, format=format)
except Exception as ex:
- print '-> error trying to backup %s [%s]' % (source.uri, ex)
+ print('-> error trying to backup %s [%s]' % (source.uri, ex))
if not self.confirm('Continue anyway?', default='n'):
raise SystemExit(1)
else:
@@ -233,7 +235,7 @@
# call hooks
repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp)
# done
- print '-> backup file', backupfile
+ print('-> backup file', backupfile)
finally:
shutil.rmtree(tmpdir)
@@ -270,14 +272,14 @@
try:
source.restore(osp.join(tmpdir, source.uri), self.confirm, drop, format)
except Exception as exc:
- print '-> error trying to restore %s [%s]' % (source.uri, exc)
+ print('-> error trying to restore %s [%s]' % (source.uri, exc))
if not self.confirm('Continue anyway?', default='n'):
raise SystemExit(1)
shutil.rmtree(tmpdir)
# call hooks
repo.init_cnxset_pool()
repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
- print '-> database restored.'
+ print('-> database restored.')
def commit(self):
self.cnx.commit()
@@ -359,11 +361,11 @@
directory = osp.join(self.config.cube_dir(cube), 'schema')
sql_scripts = glob(osp.join(directory, '*.%s.sql' % driver))
for fpath in sql_scripts:
- print '-> installing', fpath
+ print('-> installing', fpath)
failed = sqlexec(open(fpath).read(), self.cnx.system_sql, False,
delimiter=';;')
if failed:
- print '-> ERROR, skipping', fpath
+ print('-> ERROR, skipping', fpath)
# schema synchronization internals ########################################
@@ -547,12 +549,12 @@
for name in cols:
rschema = repoeschema.subjrels.get(name)
if rschema is None:
- print 'dont add %s unique constraint on %s, missing %s' % (
- ','.join(cols), eschema, name)
+ print('dont add %s unique constraint on %s, missing %s' % (
+ ','.join(cols), eschema, name))
return False
if not (rschema.final or rschema.inlined):
- print 'dont add %s unique constraint on %s, %s is neither final nor inlined' % (
- ','.join(cols), eschema, name)
+ print('dont add %s unique constraint on %s, %s is neither final nor inlined' % (
+ ','.join(cols), eschema, name))
return False
return True
@@ -737,8 +739,8 @@
rschema = self.repo.schema.rschema(attrname)
attrtype = rschema.objects(etype)[0]
except KeyError:
- print 'warning: attribute %s %s is not known, skip deletion' % (
- etype, attrname)
+ print('warning: attribute %s %s is not known, skip deletion' % (
+ etype, attrname))
else:
self.cmd_drop_relation_definition(etype, attrname, attrtype,
commit=commit)
@@ -775,7 +777,7 @@
instschema = self.repo.schema
eschema = self.fs_schema.eschema(etype)
if etype in instschema and not (eschema.final and eschema.eid is None):
- print 'warning: %s already known, skip addition' % etype
+ print('warning: %s already known, skip addition' % etype)
return
confirm = self.verbosity >= 2
groupmap = self.group_mapping()
@@ -912,7 +914,7 @@
"""
schema = self.repo.schema
if oldname not in schema:
- print 'warning: entity type %s is unknown, skip renaming' % oldname
+ print('warning: entity type %s is unknown, skip renaming' % oldname)
return
# if merging two existing entity types
if newname in schema:
@@ -1011,8 +1013,8 @@
rschema = self.fs_schema.rschema(rtype)
execute = self.cnx.execute
if rtype in reposchema:
- print 'warning: relation type %s is already known, skip addition' % (
- rtype)
+ print('warning: relation type %s is already known, skip addition' % (
+ rtype))
elif rschema.rule:
gmap = self.group_mapping()
ss.execschemarql(execute, rschema, ss.crschema2rql(rschema, gmap))
@@ -1091,8 +1093,8 @@
if not rtype in self.repo.schema:
self.cmd_add_relation_type(rtype, addrdef=False, commit=True)
if (subjtype, objtype) in self.repo.schema.rschema(rtype).rdefs:
- print 'warning: relation %s %s %s is already known, skip addition' % (
- subjtype, rtype, objtype)
+ print('warning: relation %s %s %s is already known, skip addition' % (
+ subjtype, rtype, objtype))
return
rdef = self._get_rdef(rschema, subjtype, objtype)
ss.execschemarql(self.cnx.execute, rdef,
@@ -1344,7 +1346,7 @@
# remove from entity cache to avoid memory exhaustion
del entity.cw_attr_cache[attribute]
pb.update()
- print
+ print()
source.set_storage(etype, attribute, storage)
def cmd_create_entity(self, etype, commit=False, **kwargs):
--- a/server/querier.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/querier.py Fri Sep 11 14:52:09 2015 +0200
@@ -18,6 +18,8 @@
"""Helper classes to execute RQL queries on a set of sources, performing
security checking and data aggregation.
"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
from itertools import repeat
@@ -103,13 +105,13 @@
solution, args))
if not user.matching_groups(rdef.get_groups('read')):
if DBG:
- print ('check_read_access: %s %s does not match %s' %
- (rdef, user.groups, rdef.get_groups('read')))
+ print('check_read_access: %s %s does not match %s' %
+ (rdef, user.groups, rdef.get_groups('read')))
# XXX rqlexpr not allowed
raise Unauthorized('read', rel.r_type)
if DBG:
- print ('check_read_access: %s %s matches %s' %
- (rdef, user.groups, rdef.get_groups('read')))
+ print('check_read_access: %s %s matches %s' %
+ (rdef, user.groups, rdef.get_groups('read')))
def get_local_checks(cnx, rqlst, solution):
"""Check that the given user has credentials to access data read by the
@@ -138,8 +140,8 @@
ex = Unauthorized('read', solution[varname])
ex.var = varname
if DBG:
- print ('check_read_access: %s %s %s %s' %
- (varname, eschema, user.groups, eschema.get_groups('read')))
+ print('check_read_access: %s %s %s %s' %
+ (varname, eschema, user.groups, eschema.get_groups('read')))
raise ex
# don't insert security on variable only referenced by 'NOT X relation Y' or
# 'NOT EXISTS(X relation Y)'
@@ -539,8 +541,8 @@
"""
if server.DEBUG & (server.DBG_RQL | server.DBG_SQL):
if server.DEBUG & (server.DBG_MORE | server.DBG_SQL):
- print '*'*80
- print 'querier input', repr(rql), repr(args)
+ print('*'*80)
+ print('querier input', repr(rql), repr(args))
# parse the query and binds variables
cachekey = (rql,)
try:
--- a/server/repository.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/repository.py Fri Sep 11 14:52:09 2015 +0200
@@ -25,6 +25,8 @@
point to a cubicweb instance.
* handles session management
"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
import threading
@@ -912,7 +914,7 @@
# set caches asap
extid = self.init_entity_caches(cnx, entity, source)
if server.DEBUG & server.DBG_REPO:
- print 'ADD entity', self, entity.cw_etype, entity.eid, edited
+ print('ADD entity', self, entity.cw_etype, entity.eid, edited)
prefill_entity_caches(entity)
self.hm.call_hooks('before_add_entity', cnx, entity=entity)
relations = preprocess_inlined_relations(cnx, entity)
@@ -943,8 +945,8 @@
"""
entity = edited.entity
if server.DEBUG & server.DBG_REPO:
- print 'UPDATE entity', entity.cw_etype, entity.eid, \
- entity.cw_attr_cache, edited
+ print('UPDATE entity', entity.cw_etype, entity.eid,
+ entity.cw_attr_cache, edited)
hm = self.hm
eschema = entity.e_schema
cnx.set_entity_cache(entity)
@@ -1038,7 +1040,7 @@
source = self.system_source
for etype, entities in data_by_etype.iteritems():
if server.DEBUG & server.DBG_REPO:
- print 'DELETE entities', etype, [entity.eid for entity in entities]
+ print('DELETE entities', etype, [entity.eid for entity in entities])
self.hm.call_hooks('before_delete_entity', cnx, entities=entities)
self._delete_cascade_multi(cnx, entities)
source.delete_entities(cnx, entities)
@@ -1063,7 +1065,7 @@
for rtype, eids_subj_obj in relations.iteritems():
if server.DEBUG & server.DBG_REPO:
for subjeid, objeid in eids_subj_obj:
- print 'ADD relation', subjeid, rtype, objeid
+ print('ADD relation', subjeid, rtype, objeid)
for subjeid, objeid in eids_subj_obj:
if rtype in relations_by_rtype:
relations_by_rtype[rtype].append((subjeid, objeid))
@@ -1113,7 +1115,7 @@
def glob_delete_relation(self, cnx, subject, rtype, object):
"""delete a relation from the repository"""
if server.DEBUG & server.DBG_REPO:
- print 'DELETE relation', subject, rtype, object
+ print('DELETE relation', subject, rtype, object)
source = self.system_source
self.hm.call_hooks('before_delete_relation', cnx,
eidfrom=subject, rtype=rtype, eidto=object)
--- a/server/rqlannotation.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/rqlannotation.py Fri Sep 11 14:52:09 2015 +0200
@@ -18,6 +18,7 @@
"""Functions to add additional annotations on a rql syntax tree to ease later
code generation.
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -354,9 +355,9 @@
continue
def _debug_print(self):
- print 'varsols', dict((x, sorted(str(v) for v in values))
- for x, values in self.varsols.iteritems())
- print 'ambiguous vars', sorted(self.ambiguousvars)
+ print('varsols', dict((x, sorted(str(v) for v in values))
+ for x, values in self.varsols.iteritems()))
+ print('ambiguous vars', sorted(self.ambiguousvars))
def set_rel_constraint(self, term, rel, etypes_func):
if isinstance(term, VariableRef) and self.is_ambiguous(term.variable):
--- a/server/schemaserial.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/schemaserial.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""functions for schema / permissions (de)serialization using RQL"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -49,11 +50,11 @@
return res
missing = [g for g in ('owners', 'managers', 'users', 'guests') if not g in res]
if missing:
- print 'some native groups are missing but the following groups have been found:'
- print '\n'.join('* %s (%s)' % (n, eid) for n, eid in res.items())
- print
- print 'enter the eid of a to group to map to each missing native group'
- print 'or just type enter to skip permissions granted to a group'
+ print('some native groups are missing but the following groups have been found:')
+ print('\n'.join('* %s (%s)' % (n, eid) for n, eid in res.items()))
+ print()
+ print('enter the eid of a to group to map to each missing native group')
+ print('or just type enter to skip permissions granted to a group')
for group in missing:
while True:
value = raw_input('eid for group %s: ' % group).strip()
@@ -62,13 +63,13 @@
try:
eid = int(value)
except ValueError:
- print 'eid should be an integer'
+ print('eid should be an integer')
continue
for eid_ in res.values():
if eid == eid_:
break
else:
- print 'eid is not a group eid'
+ print('eid is not a group eid')
continue
res[name] = eid
break
@@ -344,7 +345,7 @@
current schema
"""
_title = '-> storing the schema in the database '
- print _title,
+ print(_title, end=' ')
execute = cnx.execute
eschemas = schema.entities()
pb_size = (len(eschemas + schema.relations())
@@ -397,7 +398,7 @@
for rql, kwargs in specialize2rql(schema):
execute(rql, kwargs, build_descr=False)
pb.update()
- print
+ print()
# high level serialization functions
@@ -455,8 +456,8 @@
columnset = set()
for columns in eschema._unique_together:
if columns in columnset:
- print ('schemaserial: skipping duplicate unique together %r %r' %
- (eschema.type, columns))
+ print('schemaserial: skipping duplicate unique together %r %r' %
+ (eschema.type, columns))
continue
columnset.add(columns)
rql, args = _uniquetogether2rql(eschema, columns)
@@ -619,8 +620,8 @@
yield ('SET X %s_permission Y WHERE Y eid %%(g)s, X eid %%(x)s' % action,
{'g': groupmap[group_or_rqlexpr]})
except KeyError:
- print ("WARNING: group %s used in permissions for %s was ignored because it doesn't exist."
- " You may want to add it into a precreate.py file" % (group_or_rqlexpr, erschema))
+ print("WARNING: group %s used in permissions for %s was ignored because it doesn't exist."
+ " You may want to add it into a precreate.py file" % (group_or_rqlexpr, erschema))
continue
else:
# rqlexpr
--- a/server/serverconfig.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/serverconfig.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""server.serverconfig definition"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -276,7 +277,7 @@
assert len(self.sources_mode) == 1
if source.connect_for_migration:
return True
- print 'not connecting to source', source.uri, 'during migration'
+ print('not connecting to source', source.uri, 'during migration')
return False
if 'all' in self.sources_mode:
assert len(self.sources_mode) == 1
--- a/server/serverctl.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/serverctl.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""cubicweb-ctl commands and command handlers specific to the repository"""
+from __future__ import print_function
__docformat__ = 'restructuredtext en'
@@ -55,25 +56,25 @@
driver = source['db-driver']
dbhelper = get_db_helper(driver)
if interactive:
- print '-> connecting to %s database' % driver,
+ print('-> connecting to %s database' % driver, end=' ')
if dbhost:
- print '%s@%s' % (dbname, dbhost),
+ print('%s@%s' % (dbname, dbhost), end=' ')
else:
- print dbname,
+ print(dbname, end=' ')
if dbhelper.users_support:
if not interactive or (not special_privs and source.get('db-user')):
user = source.get('db-user', os.environ.get('USER', ''))
if interactive:
- print 'as', user
+ print('as', user)
password = source.get('db-password')
else:
- print
+ print()
if special_privs:
- print 'WARNING'
+ print('WARNING')
print ('the user will need the following special access rights '
'on the database:')
- print special_privs
- print
+ print(special_privs)
+ print()
default_user = source.get('db-user', os.environ.get('USER', ''))
user = raw_input('Connect as user ? [%r]: ' % default_user)
user = user.strip() or default_user
@@ -146,7 +147,7 @@
cnx = repoapi.connect(repo, login, password=pwd)
return repo, cnx
except AuthenticationError:
- print '-> Error: wrong user/password.'
+ print('-> Error: wrong user/password.')
# reset cubes else we'll have an assertion error on next retry
config._cubes = None
login, pwd = manager_userpasswd()
@@ -164,9 +165,9 @@
"""
config = self.config
if not automatic:
- print underline_title('Configuring the repository')
+ print(underline_title('Configuring the repository'))
config.input_config('email', inputlevel)
- print '\n'+underline_title('Configuring the sources')
+ print('\n'+underline_title('Configuring the sources'))
sourcesfile = config.sources_file()
# hack to make Method('default_instance_id') usable in db option defs
# (in native.py)
@@ -174,12 +175,12 @@
options=SOURCE_TYPES['native'].options)
if not automatic:
sconfig.input_config(inputlevel=inputlevel)
- print
+ print()
sourcescfg = {'system': sconfig}
if automatic:
# XXX modify a copy
password = generate_password()
- print '-> set administrator account to admin / %s' % password
+ print('-> set administrator account to admin / %s' % password)
USER_OPTIONS[1][1]['default'] = password
sconfig = Configuration(options=USER_OPTIONS)
else:
@@ -197,8 +198,8 @@
CWCTL.run(['db-create', '--config-level', str(inputlevel),
self.config.appid])
else:
- print ('-> nevermind, you can do it later with '
- '"cubicweb-ctl db-create %s".' % self.config.appid)
+ print('-> nevermind, you can do it later with '
+ '"cubicweb-ctl db-create %s".' % self.config.appid)
@contextmanager
@@ -242,26 +243,26 @@
with db_transaction(source, privilege='DROP SCHEMA') as cursor:
helper = get_db_helper(source['db-driver'])
helper.drop_schema(cursor, db_namespace)
- print '-> database schema %s dropped' % db_namespace
+ print('-> database schema %s dropped' % db_namespace)
def _drop_database(self, source):
dbname = source['db-name']
if source['db-driver'] == 'sqlite':
- print 'deleting database file %(db-name)s' % source
+ print('deleting database file %(db-name)s' % source)
os.unlink(source['db-name'])
- print '-> database %(db-name)s dropped.' % source
+ print('-> database %(db-name)s dropped.' % source)
else:
helper = get_db_helper(source['db-driver'])
with db_sys_transaction(source, privilege='DROP DATABASE') as cursor:
- print 'dropping database %(db-name)s' % source
+ print('dropping database %(db-name)s' % source)
cursor.execute('DROP DATABASE "%(db-name)s"' % source)
- print '-> database %(db-name)s dropped.' % source
+ print('-> database %(db-name)s dropped.' % source)
def _drop_user(self, source):
user = source['db-user'] or None
if user is not None:
with db_sys_transaction(source, privilege='DROP USER') as cursor:
- print 'dropping user %s' % user
+ print('dropping user %s' % user)
cursor.execute('DROP USER %s' % user)
def _cleanup_steps(self, source):
@@ -288,7 +289,7 @@
try:
step(source)
except Exception as exc:
- print 'ERROR', exc
+ print('ERROR', exc)
if ASK.confirm('An error occurred. Continue anyway?',
default_is_yes=False):
continue
@@ -357,7 +358,7 @@
ASK.confirm('Database %s already exists. Drop it?' % dbname)):
os.unlink(dbname)
elif self.config.create_db:
- print '\n'+underline_title('Creating the system database')
+ print('\n'+underline_title('Creating the system database'))
# connect on the dbms system base to create our base
dbcnx = _db_sys_cnx(source, 'CREATE/DROP DATABASE and / or USER',
interactive=not automatic)
@@ -368,17 +369,17 @@
if not helper.user_exists(cursor, user) and (automatic or \
ASK.confirm('Create db user %s ?' % user, default_is_yes=False)):
helper.create_user(source['db-user'], source.get('db-password'))
- print '-> user %s created.' % user
+ print('-> user %s created.' % user)
if dbname in helper.list_databases(cursor):
if automatic or ASK.confirm('Database %s already exists -- do you want to drop it ?' % dbname):
cursor.execute('DROP DATABASE "%s"' % dbname)
else:
- print ('you may want to run "cubicweb-ctl db-init '
- '--drop %s" manually to continue.' % config.appid)
+ print('you may want to run "cubicweb-ctl db-init '
+ '--drop %s" manually to continue.' % config.appid)
return
createdb(helper, source, dbcnx, cursor)
dbcnx.commit()
- print '-> database %s created.' % dbname
+ print('-> database %s created.' % dbname)
except BaseException:
dbcnx.rollback()
raise
@@ -400,13 +401,13 @@
try:
helper.create_language(cursor, extlang)
except Exception as exc:
- print '-> ERROR:', exc
- print '-> could not create language %s, some stored procedures might be unusable' % extlang
+ print('-> ERROR:', exc)
+ print('-> could not create language %s, some stored procedures might be unusable' % extlang)
cnx.rollback()
else:
cnx.commit()
- print '-> database for instance %s created and necessary extensions installed.' % appid
- print
+ print('-> database for instance %s created and necessary extensions installed.' % appid)
+ print()
if automatic:
CWCTL.run(['db-init', '--automatic', '--config-level', '0',
config.appid])
@@ -414,8 +415,8 @@
CWCTL.run(['db-init', '--config-level',
str(self.config.config_level), config.appid])
else:
- print ('-> nevermind, you can do it later with '
- '"cubicweb-ctl db-init %s".' % config.appid)
+ print('-> nevermind, you can do it later with '
+ '"cubicweb-ctl db-init %s".' % config.appid)
class InitInstanceCommand(Command):
@@ -452,7 +453,7 @@
def run(self, args):
check_options_consistency(self.config)
- print '\n'+underline_title('Initializing the system database')
+ print('\n'+underline_title('Initializing the system database'))
from cubicweb.server import init_repository
appid = args[0]
config = ServerConfiguration.config_for(appid)
@@ -506,7 +507,7 @@
type = raw_input('source type (%s): '
% ', '.join(sorted(SOURCE_TYPES)))
if type not in SOURCE_TYPES:
- print '-> unknown source type, use one of the available types.'
+ print('-> unknown source type, use one of the available types.')
continue
sourcemodule = SOURCE_TYPES[type].module
if not sourcemodule.startswith('cubicweb.'):
@@ -524,16 +525,16 @@
% ', '.join(sorted(repo.vreg['parsers'])))
if parser in repo.vreg['parsers']:
break
- print '-> unknown parser identifier, use one of the available types.'
+ print('-> unknown parser identifier, use one of the available types.')
while True:
sourceuri = raw_input('source identifier (a unique name used to '
'tell sources apart): ').strip()
if not sourceuri:
- print '-> mandatory.'
+ print('-> mandatory.')
else:
sourceuri = unicode(sourceuri, sys.stdin.encoding)
if sourceuri in used:
- print '-> uri already used, choose another one.'
+ print('-> uri already used, choose another one.')
else:
break
url = raw_input('source URL (leave empty for none): ').strip()
@@ -583,10 +584,10 @@
cnx.rollback()
import traceback
traceback.print_exc()
- print '-> an error occurred:', ex
+ print('-> an error occurred:', ex)
else:
cnx.commit()
- print '-> rights granted to %s on instance %s.' % (appid, user)
+ print('-> rights granted to %s on instance %s.' % (appid, user))
class ResetAdminPasswordCommand(Command):
@@ -617,7 +618,7 @@
try:
adminlogin = sourcescfg['admin']['login']
except KeyError:
- print '-> Error: could not get cubicweb administrator login.'
+ print('-> Error: could not get cubicweb administrator login.')
sys.exit(1)
cnx = source_cnx(sourcescfg['system'])
driver = sourcescfg['system']['db-driver']
@@ -627,9 +628,9 @@
cursor.execute("SELECT * FROM cw_CWUser WHERE cw_login=%(l)s",
{'l': adminlogin})
if not cursor.fetchall():
- print ("-> error: admin user %r specified in sources doesn't exist "
- "in the database" % adminlogin)
- print " fix your sources file before running this command"
+ print("-> error: admin user %r specified in sources doesn't exist "
+ "in the database" % adminlogin)
+ print(" fix your sources file before running this command")
cnx.close()
sys.exit(1)
if self.config.password is None:
@@ -650,10 +651,10 @@
cnx.rollback()
import traceback
traceback.print_exc()
- print '-> an error occurred:', ex
+ print('-> an error occurred:', ex)
else:
cnx.commit()
- print '-> password reset, sources file regenerated.'
+ print('-> password reset, sources file regenerated.')
cnx.close()
@@ -666,17 +667,17 @@
if sudo:
dmpcmd = 'sudo %s' % (dmpcmd)
dmpcmd = 'ssh -t %s "%s"' % (host, dmpcmd)
- print dmpcmd
+ print(dmpcmd)
if os.system(dmpcmd):
raise ExecutionError('Error while dumping the database')
if output is None:
output = filename
cmd = 'scp %s:/tmp/%s %s' % (host, filename, output)
- print cmd
+ print(cmd)
if os.system(cmd):
raise ExecutionError('Error while retrieving the dump at /tmp/%s' % filename)
rmcmd = 'ssh -t %s "rm -f /tmp/%s"' % (host, filename)
- print rmcmd
+ print(rmcmd)
if os.system(rmcmd) and not ASK.confirm(
'An error occurred while deleting remote dump at /tmp/%s. '
'Continue anyway?' % filename):
@@ -701,23 +702,23 @@
dbversions = repo.get_versions()
mih.shutdown()
if not dbversions:
- print "bad or missing version information in the database, don't upgrade file system"
+ print("bad or missing version information in the database, don't upgrade file system")
return
# version of installed software
eversion = dbversions['cubicweb']
status = instance_status(config, eversion, dbversions)
# * database version > installed software
if status == 'needsoftupgrade':
- print "** The database of %s is more recent than the installed software!" % config.appid
- print "** Upgrade your software, then migrate the database by running the command"
- print "** 'cubicweb-ctl upgrade %s'" % config.appid
+ print("** The database of %s is more recent than the installed software!" % config.appid)
+ print("** Upgrade your software, then migrate the database by running the command")
+ print("** 'cubicweb-ctl upgrade %s'" % config.appid)
return
# * database version < installed software, an upgrade will be necessary
# anyway, just rewrite vc.conf and warn user he has to upgrade
elif status == 'needapplupgrade':
- print "** The database of %s is older than the installed software." % config.appid
- print "** Migrate the database by running the command"
- print "** 'cubicweb-ctl upgrade %s'" % config.appid
+ print("** The database of %s is older than the installed software." % config.appid)
+ print("** Migrate the database by running the command")
+ print("** 'cubicweb-ctl upgrade %s'" % config.appid)
return
# * database version = installed software, database version = instance fs version
# ok!
@@ -732,12 +733,12 @@
try:
softversion = config.cube_version(cube)
except ConfigurationError:
- print '-> Error: no cube version information for %s, please check that the cube is installed.' % cube
+ print('-> Error: no cube version information for %s, please check that the cube is installed.' % cube)
continue
try:
applversion = vcconf[cube]
except KeyError:
- print '-> Error: no cube version information for %s in version configuration.' % cube
+ print('-> Error: no cube version information for %s in version configuration.' % cube)
continue
if softversion == applversion:
continue
@@ -883,7 +884,7 @@
_local_restore(destappid, output, not self.config.no_drop,
self.config.format)
if self.config.keep_dump:
- print '-> you can get the dump file at', output
+ print('-> you can get the dump file at', output)
else:
os.remove(output)
@@ -1003,7 +1004,7 @@
repo.shutdown()
for key, val in stats.iteritems():
if val:
- print key, ':', val
+ print(key, ':', val)
--- a/server/session.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/session.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,8 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""Repository users' and internal' sessions."""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
import sys
@@ -874,7 +876,7 @@
processed = []
self.commit_state = 'precommit'
if debug:
- print self.commit_state, '*' * 20
+ print(self.commit_state, '*' * 20)
try:
with self.running_hooks_ops():
while self.pending_operations:
@@ -882,7 +884,7 @@
operation.processed = 'precommit'
processed.append(operation)
if debug:
- print operation
+ print(operation)
operation.handle_event('precommit_event')
self.pending_operations[:] = processed
self.debug('precommit transaction %s done', self.connectionid)
@@ -899,11 +901,11 @@
# and revertcommit, that will be enough in mont case.
operation.failed = True
if debug:
- print self.commit_state, '*' * 20
+ print(self.commit_state, '*' * 20)
with self.running_hooks_ops():
for operation in reversed(processed):
if debug:
- print operation
+ print(operation)
try:
operation.handle_event('revertprecommit_event')
except BaseException:
@@ -917,12 +919,12 @@
self.cnxset.commit()
self.commit_state = 'postcommit'
if debug:
- print self.commit_state, '*' * 20
+ print(self.commit_state, '*' * 20)
with self.running_hooks_ops():
while self.pending_operations:
operation = self.pending_operations.pop(0)
if debug:
- print operation
+ print(operation)
operation.processed = 'postcommit'
try:
operation.handle_event('postcommit_event')
--- a/server/sources/__init__.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/sources/__init__.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""cubicweb server sources support"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -36,25 +37,25 @@
def dbg_st_search(uri, union, varmap, args, cachekey=None, prefix='rql for'):
if server.DEBUG & server.DBG_RQL:
global t
- print ' %s %s source: %s' % (prefix, uri, repr(union.as_string()))
+ print(' %s %s source: %s' % (prefix, uri, repr(union.as_string())))
t = time()
if varmap:
- print ' using varmap', varmap
+ print(' using varmap', varmap)
if server.DEBUG & server.DBG_MORE:
- print ' args', repr(args)
- print ' cache key', cachekey
- print ' solutions', ','.join(str(s.solutions)
- for s in union.children)
+ print(' args', repr(args))
+ print(' cache key', cachekey)
+ print(' solutions', ','.join(str(s.solutions)
+ for s in union.children))
# return true so it can be used as assertion (and so be killed by python -O)
return True
def dbg_results(results):
if server.DEBUG & server.DBG_RQL:
if len(results) > 10:
- print ' -->', results[:10], '...', len(results),
+ print(' -->', results[:10], '...', len(results), end=' ')
else:
- print ' -->', results,
- print 'time: ', time() - t
+ print(' -->', results, end=' ')
+ print('time: ', time() - t)
# return true so it can be used as assertion (and so be killed by python -O)
return True
--- a/server/sources/native.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/sources/native.py Fri Sep 11 14:52:09 2015 +0200
@@ -23,6 +23,8 @@
string. This is because it should actually be Bytes but we want an index on
it for fast querying.
"""
+from __future__ import print_function
+
__docformat__ = "restructuredtext en"
from cPickle import loads, dumps
@@ -76,12 +78,12 @@
it's a function just so that it shows up in profiling
"""
if server.DEBUG & server.DBG_SQL:
- print 'exec', query, args
+ print('exec', query, args)
try:
self.cu.execute(str(query), args)
except Exception as ex:
- print "sql: %r\n args: %s\ndbms message: %r" % (
- query, args, ex.args[0])
+ print("sql: %r\n args: %s\ndbms message: %r" % (
+ query, args, ex.args[0]))
raise
def fetchall(self):
@@ -708,7 +710,7 @@
"""
cursor = cnx.cnxset.cu
if server.DEBUG & server.DBG_SQL:
- print 'exec', query, args, cnx.cnxset.cnx
+ print('exec', query, args, cnx.cnxset.cnx)
try:
# str(query) to avoid error if it's a unicode string
cursor.execute(str(query), args)
@@ -767,7 +769,7 @@
it's a function just so that it shows up in profiling
"""
if server.DEBUG & server.DBG_SQL:
- print 'execmany', query, 'with', len(args), 'arguments', cnx.cnxset.cnx
+ print('execmany', query, 'with', len(args), 'arguments', cnx.cnxset.cnx)
cursor = cnx.cnxset.cu
try:
# str(query) to avoid error if it's a unicode string
--- a/server/sqlutils.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/sqlutils.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""SQL utilities functions and classes."""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -43,7 +44,7 @@
SQL_PREFIX = 'cw_'
def _run_command(cmd):
- print ' '.join(cmd)
+ print(' '.join(cmd))
return subprocess.call(cmd)
@@ -94,7 +95,7 @@
if cnx:
cnx.commit()
if withpb:
- print
+ print()
if sqlstmts_as_string:
failed = delimiter.join(failed)
return failed
--- a/server/test/unittest_ldapsource.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/test/unittest_ldapsource.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""cubicweb.server.sources.ldapusers unit and functional tests"""
+from __future__ import print_function
import os
import sys
@@ -63,8 +64,8 @@
slapproc = subprocess.Popen(cmdline, stdout=PIPE, stderr=PIPE)
stdout, stderr = slapproc.communicate()
if slapproc.returncode:
- print >> sys.stderr, ('slapadd returned with status: %s'
- % slapproc.returncode)
+ print('slapadd returned with status: %s'
+ % slapproc.returncode, file=sys.stderr)
sys.stdout.write(stdout)
sys.stderr.write(stderr)
@@ -96,8 +97,8 @@
os.kill(cls.slapd_process.pid, signal.SIGTERM)
stdout, stderr = cls.slapd_process.communicate()
if cls.slapd_process.returncode:
- print >> sys.stderr, ('slapd returned with status: %s'
- % cls.slapd_process.returncode)
+ print('slapd returned with status: %s'
+ % cls.slapd_process.returncode, file=sys.stderr)
sys.stdout.write(stdout)
sys.stderr.write(stderr)
config.info('DONE')
--- a/server/test/unittest_rql2sql.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/test/unittest_rql2sql.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""unit tests for module cubicweb.server.sources.rql2sql"""
+from __future__ import print_function
import sys
import os
@@ -1246,13 +1247,13 @@
except Exception as ex:
if 'r' in locals():
try:
- print (r%args).strip()
+ print((r%args).strip())
except KeyError:
- print 'strange, missing substitution'
- print r, nargs
- print '!='
- print sql.strip()
- print 'RQL:', rql
+ print('strange, missing substitution')
+ print(r, nargs)
+ print('!=')
+ print(sql.strip())
+ print('RQL:', rql)
raise
def _parse(self, rqls):
@@ -1269,11 +1270,11 @@
r, args, cbs = self.o.generate(rqlst, args)
self.assertEqual((r.strip(), args), sql)
except Exception as ex:
- print rql
+ print(rql)
if 'r' in locals():
- print r.strip()
- print '!='
- print sql[0].strip()
+ print(r.strip())
+ print('!=')
+ print(sql[0].strip())
raise
return
--- a/server/utils.py Fri Sep 11 14:28:06 2015 +0200
+++ b/server/utils.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""Some utilities for the CubicWeb server."""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -92,7 +93,7 @@
passwdmsg='password'):
if not user:
if msg:
- print msg
+ print(msg)
while not user:
user = raw_input('login: ')
user = unicode(user, sys.stdin.encoding)
@@ -102,7 +103,7 @@
passwd2 = getpass('confirm password: ')
if passwd == passwd2:
break
- print 'password doesn\'t match'
+ print('password doesn\'t match')
passwd = getpass('password: ')
# XXX decode password using stdin encoding then encode it using appl'encoding
return user, passwd
--- a/toolsutils.py Fri Sep 11 14:28:06 2015 +0200
+++ b/toolsutils.py Fri Sep 11 14:52:09 2015 +0200
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""some utilities for cubicweb command line tools"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -62,29 +63,29 @@
"""create a directory if it doesn't exist yet"""
try:
makedirs(directory)
- print '-> created directory %s' % directory
+ print('-> created directory %s' % directory)
except OSError as ex:
import errno
if ex.errno != errno.EEXIST:
raise
- print '-> no need to create existing directory %s' % directory
+ print('-> no need to create existing directory %s' % directory)
def create_symlink(source, target):
"""create a symbolic link"""
if exists(target):
remove(target)
symlink(source, target)
- print '[symlink] %s <-- %s' % (target, source)
+ print('[symlink] %s <-- %s' % (target, source))
def create_copy(source, target):
import shutil
- print '[copy] %s <-- %s' % (target, source)
+ print('[copy] %s <-- %s' % (target, source))
shutil.copy2(source, target)
def rm(whatever):
import shutil
shutil.rmtree(whatever)
- print '-> removed %s' % whatever
+ print('-> removed %s' % whatever)
def show_diffs(appl_file, ref_file, askconfirm=True):
"""interactivly replace the old file with the new file according to
@@ -95,8 +96,8 @@
diffs = pipe.stdout.read()
if diffs:
if askconfirm:
- print
- print diffs
+ print()
+ print(diffs)
action = ASK.ask('Replace ?', ('Y', 'n', 'q'), 'Y').lower()
else:
action = 'y'
@@ -106,7 +107,7 @@
except IOError:
os.system('chmod a+w %s' % appl_file)
shutil.copyfile(ref_file, appl_file)
- print 'replaced'
+ print('replaced')
elif action == 'q':
sys.exit(0)
else:
@@ -114,9 +115,9 @@
copy = file(copy_file, 'w')
copy.write(open(ref_file).read())
copy.close()
- print 'keep current version, the new file has been written to', copy_file
+ print('keep current version, the new file has been written to', copy_file)
else:
- print 'no diff between %s and %s' % (appl_file, ref_file)
+ print('no diff between %s and %s' % (appl_file, ref_file))
SKEL_EXCLUDE = ('*.py[co]', '*.orig', '*~', '*_flymake.py')
def copy_skeleton(skeldir, targetdir, context,
@@ -143,7 +144,7 @@
if not askconfirm or not exists(tfpath) or \
ASK.confirm('%s exists, overwrite?' % tfpath):
fill_templated_file(fpath, tfpath, context)
- print '[generate] %s <-- %s' % (tfpath, fpath)
+ print('[generate] %s <-- %s' % (tfpath, fpath))
elif exists(tfpath):
show_diffs(tfpath, fpath, askconfirm)
else:
@@ -160,7 +161,7 @@
if log:
log('set permissions to 0600 for %s', filepath)
else:
- print '-> set permissions to 0600 for %s' % filepath
+ print('-> set permissions to 0600 for %s' % filepath)
chmod(filepath, 0600)
def read_config(config_file, raise_if_unreadable=False):
@@ -234,7 +235,7 @@
raise ConfigurationError(msg)
def fail(self, reason):
- print "command failed:", reason
+ print("command failed:", reason)
sys.exit(1)
--- a/web/test/unittest_viewselector.py Fri Sep 11 14:28:06 2015 +0200
+++ b/web/test/unittest_viewselector.py Fri Sep 11 14:52:09 2015 +0200
@@ -17,6 +17,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""XXX rename, split, reorganize this"""
+from __future__ import print_function
from logilab.common.testlib import unittest_main
@@ -76,9 +77,9 @@
try:
self.assertSetEqual(list(content), expected)
except Exception:
- print registry, sorted(expected), sorted(content)
- print 'no more', [v for v in expected if not v in content]
- print 'missing', [v for v in content if not v in expected]
+ print(registry, sorted(expected), sorted(content))
+ print('no more', [v for v in expected if not v in content])
+ print('missing', [v for v in content if not v in expected])
raise
def setUp(self):
@@ -461,7 +462,7 @@
obj = self.vreg['views'].select(vid, req, rset=rset, **args)
return obj.render(**args)
except Exception:
- print vid, rset, args
+ print(vid, rset, args)
raise
def test_form(self):
--- a/web/webctl.py Fri Sep 11 14:28:06 2015 +0200
+++ b/web/webctl.py Fri Sep 11 14:52:09 2015 +0200
@@ -18,6 +18,7 @@
"""cubicweb-ctl commands and command handlers common to twisted/modpython
web configuration
"""
+from __future__ import print_function
__docformat__ = "restructuredtext en"
@@ -44,7 +45,7 @@
def bootstrap(self, cubes, automatic=False, inputlevel=0):
"""bootstrap this configuration"""
if not automatic:
- print '\n' + underline_title('Generic web configuration')
+ print('\n' + underline_title('Generic web configuration'))
config = self.config
config.input_config('web', inputlevel)
if ASK.confirm('Allow anonymous access ?', False):
@@ -87,8 +88,8 @@
copy(osp.join(resource_dir, resource_path), dest_resource)
# handle md5 version subdirectory
linkdir(dest, osp.join(dest, config.instance_md5_version()))
- print ('You can use apache rewrite rule below :\n'
- 'RewriteRule ^/data/(.*) %s/$1 [L]' % dest)
+ print('You can use apache rewrite rule below :\n'
+ 'RewriteRule ^/data/(.*) %s/$1 [L]' % dest)
def _datadirs(self, config, repo=None):
if repo is None: