--- a/cwctl.py Fri Jul 24 17:03:32 2009 +0200
+++ b/cwctl.py Fri Jul 24 17:50:41 2009 +0200
@@ -313,6 +313,7 @@
# create the additional data directory for this instance
if config.appdatahome != config.apphome: # true in dev mode
create_dir(config.appdatahome)
+ create_dir(join(config.appdatahome, 'backup'))
if config['uid']:
from logilab.common.shellutils import chown
# this directory should be owned by the uid of the server process
--- a/misc/migration/3.4.0_common.py Fri Jul 24 17:03:32 2009 +0200
+++ b/misc/migration/3.4.0_common.py Fri Jul 24 17:50:41 2009 +0200
@@ -1,1 +1,6 @@
+from os.path import join
+from cubicweb.toolsutils import create_dir
+
option_renamed('pyro-application-id', 'pyro-instance-id')
+
+create_dir(join(config.appdatahome, 'backup'))
--- a/server/hooksmanager.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/hooksmanager.py Fri Jul 24 17:50:41 2009 +0200
@@ -34,7 +34,8 @@
'before_delete_entity', 'after_delete_entity')
RELATIONS_HOOKS = ('before_add_relation', 'after_add_relation' ,
'before_delete_relation','after_delete_relation')
-SYSTEM_HOOKS = ('server_startup', 'server_shutdown',
+SYSTEM_HOOKS = ('server_backup', 'server_restore',
+ 'server_startup', 'server_shutdown',
'session_open', 'session_close')
ALL_HOOKS = frozenset(ENTITIES_HOOKS + RELATIONS_HOOKS + SYSTEM_HOOKS)
@@ -220,9 +221,9 @@
'%s: events is expected to be a tuple, not %s' % (
cls, type(cls.events))
for event in cls.events:
- if event == 'server_startup':
+ if event in SYSTEM_HOOKS:
assert not cls.accepts or cls.accepts == ('Any',), \
- '%s doesnt make sense on server_startup' % cls.accepts
+ '%s doesnt make sense on %s' % (cls.accepts, event)
cls.accepts = ('Any',)
for ertype in cls.accepts:
if (event, ertype) in done:
--- a/server/migractions.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/migractions.py Fri Jul 24 17:50:41 2009 +0200
@@ -109,61 +109,26 @@
def backup_database(self, backupfile=None, askconfirm=True):
config = self.config
- source = config.sources()['system']
- helper = get_adv_func_helper(source['db-driver'])
- date = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
- app = config.appid
- backupfile = backupfile or join(config.backup_dir(),
- '%s-%s.dump' % (app, date))
- if exists(backupfile):
- if not self.confirm('a backup already exists for %s, overwrite it?' % app):
- return
- elif askconfirm and not self.confirm('backup %s database?' % app):
- return
- cmd = helper.backup_command(source['db-name'], source.get('db-host'),
- source.get('db-user'), backupfile,
- keepownership=False)
- while True:
- print cmd
- if os.system(cmd):
- print 'error while backuping the base'
- answer = self.confirm('continue anyway?',
- shell=False, abort=False, retry=True)
- if not answer:
- raise SystemExit(1)
- if answer == 1: # 1: continue, 2: retry
- break
- else:
- from cubicweb.toolsutils import restrict_perms_to_user
- print 'database backup:', backupfile
- restrict_perms_to_user(backupfile, self.info)
- break
+ repo = self.repo_connect()
+ timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
+ for source in repo.sources:
+ source.backup(self.confirm, backupfile, timestamp,
+ askconfirm=askconfirm)
+ repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp)
- def restore_database(self, backupfile, drop=True):
+ def restore_database(self, backupfile, drop=True, systemonly=True,
+ askconfirm=True):
config = self.config
- source = config.sources()['system']
- helper = get_adv_func_helper(source['db-driver'])
- app = config.appid
- if not exists(backupfile):
- raise Exception("backup file %s doesn't exist" % backupfile)
- if self.confirm('restore %s database from %s ?' % (app, backupfile)):
- for cmd in helper.restore_commands(source['db-name'], source.get('db-host'),
- source.get('db-user'), backupfile,
- source['db-encoding'],
- keepownership=False, drop=drop):
- while True:
- print cmd
- if os.system(cmd):
- print 'error while restoring the base'
- answer = self.confirm('continue anyway?',
- shell=False, abort=False, retry=True)
- if not answer:
- raise SystemExit(1)
- if answer == 1: # 1: continue, 2: retry
- break
- else:
- break
- print 'database restored'
+ repo = self.repo_connect()
+ if systemonly:
+ repo.system_source.restore(self.confirm, backupfile=backupfile,
+ drop=drop, askconfirm=askconfirm)
+ else:
+ # in that case, backup file is expected to be a time stamp
+ for source in repo.sources:
+ source.backup(self.confirm, timestamp=backupfile, drop=drop,
+ askconfirm=askconfirm)
+ repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
@property
def cnx(self):
--- a/server/repository.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/repository.py Fri Jul 24 17:50:41 2009 +0200
@@ -217,8 +217,11 @@
# close initialization pool and reopen fresh ones for proper
# initialization now that we know cubes
self._get_pool().close(True)
+ # list of available pools (we can't iterated on Queue instance)
+ self.pools = []
for i in xrange(config['connections-pool-size']):
- self._available_pools.put_nowait(ConnectionsPool(self.sources))
+ self.pools.append(ConnectionsPool(self.sources))
+ self._available_pools.put_nowait(self.pools[-1])
self._shutting_down = False
if not (config.creating or config.repairing):
# call instance level initialisation hooks
--- a/server/serverconfig.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/serverconfig.py Fri Jul 24 17:50:41 2009 +0200
@@ -211,11 +211,6 @@
"""instance schema directory"""
return env_path('CW_SCHEMA_LIB', cls.SCHEMAS_LIB_DIR, 'schemas')
- @classmethod
- def backup_dir(cls):
- """backup directory where a stored db backups before migration"""
- return env_path('CW_BACKUP', cls.BACKUP_DIR, 'run time')
-
def bootstrap_cubes(self):
from logilab.common.textutils import get_csv
for line in file(join(self.apphome, 'bootstrap_cubes')):
@@ -325,9 +320,11 @@
clear_cache(self, 'sources')
def migration_handler(self, schema=None, interactive=True,
- cnx=None, repo=None, connect=True):
+ cnx=None, repo=None, connect=True, verbosity=None):
"""return a migration handler instance"""
from cubicweb.server.migractions import ServerMigrationHelper
+ if verbosity is None:
+ verbosity = getattr(self, 'verbosity', 0)
return ServerMigrationHelper(self, schema, interactive=interactive,
cnx=cnx, repo=repo, connect=connect,
- verbosity=getattr(self, 'verbosity', 0))
+ verbosity=verbosity)
--- a/server/serverctl.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/serverctl.py Fri Jul 24 17:50:41 2009 +0200
@@ -5,7 +5,7 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
-__docformat__ = "restructuredtext en"
+__docformat__ = 'restructuredtext en'
import sys
import os
@@ -169,7 +169,7 @@
config.write_bootstrap_cubes_file(cubes)
def postcreate(self):
- if confirm('Do you want to run db-create to create the "system database" ?'):
+ if confirm('Do you want to run db-create to create the system database ?'):
verbosity = (self.config.mode == 'installed') and 'y' or 'n'
cmd_run('db-create', self.config.appid, '--verbose=%s' % verbosity)
else:
@@ -246,11 +246,11 @@
arguments = '<instance>'
options = (
- ("create-db",
- {'short': 'c', 'type': "yn", 'metavar': '<y or n>',
+ ('create-db',
+ {'short': 'c', 'type': 'yn', 'metavar': '<y or n>',
'default': True,
'help': 'create the database (yes by default)'}),
- ("verbose",
+ ('verbose',
{'short': 'v', 'type' : 'yn', 'metavar': '<verbose>',
'default': 'n',
'help': 'verbose mode: will ask all possible configuration questions',
@@ -262,14 +262,14 @@
from logilab.common.adbh import get_adv_func_helper
from indexer import get_indexer
verbose = self.get('verbose')
- appid = pop_arg(args, msg="No instance specified !")
+ appid = pop_arg(args, msg='No instance specified !')
config = ServerConfiguration.config_for(appid)
create_db = self.config.create_db
source = config.sources()['system']
driver = source['db-driver']
helper = get_adv_func_helper(driver)
if create_db:
- print '\n'+underline_title('Creating the "system database"')
+ print '\n'+underline_title('Creating the system database')
# connect on the dbms system base to create our base
dbcnx = _db_sys_cnx(source, 'CREATE DATABASE and / or USER', verbose=verbose)
cursor = dbcnx.cursor()
@@ -310,7 +310,7 @@
cnx.commit()
print '-> database for instance %s created and necessary extensions installed.' % appid
print
- if confirm('Do you want to run db-init to initialize the "system database" ?'):
+ if confirm('Do you want to run db-init to initialize the system database ?'):
cmd_run('db-init', config.appid)
else:
print ('-> nevermind, you can do it later with '
@@ -331,7 +331,7 @@
arguments = '<instance>'
options = (
- ("drop",
+ ('drop',
{'short': 'd', 'action': 'store_true',
'default': False,
'help': 'insert drop statements to remove previously existant \
@@ -339,9 +339,9 @@
)
def run(self, args):
- print '\n'+underline_title('Initializing the "system database"')
+ print '\n'+underline_title('Initializing the system database')
from cubicweb.server import init_repository
- appid = pop_arg(args, msg="No instance specified !")
+ appid = pop_arg(args, msg='No instance specified !')
config = ServerConfiguration.config_for(appid)
init_repository(config, drop=self.config.drop)
@@ -358,8 +358,8 @@
arguments = '<instance> <user>'
options = (
- ("set-owner",
- {'short': 'o', 'type' : "yn", 'metavar' : '<yes or no>',
+ ('set-owner',
+ {'short': 'o', 'type' : 'yn', 'metavar' : '<yes or no>',
'default' : False,
'help': 'Set the user as tables owner if yes (no by default).'}
),
@@ -367,8 +367,8 @@
def run(self, args):
"""run the command with its specific arguments"""
from cubicweb.server.sqlutils import sqlexec, sqlgrants
- appid = pop_arg(args, 1, msg="No instance specified !")
- user = pop_arg(args, msg="No user specified !")
+ appid = pop_arg(args, 1, msg='No instance specified !')
+ user = pop_arg(args, msg='No user specified !')
config = ServerConfiguration.config_for(appid)
source = config.sources()['system']
set_owner = self.config.set_owner
@@ -400,7 +400,7 @@
"""run the command with its specific arguments"""
from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX
from cubicweb.server.utils import crypt_password, manager_userpasswd
- appid = pop_arg(args, 1, msg="No instance specified !")
+ appid = pop_arg(args, 1, msg='No instance specified !')
config = ServerConfiguration.config_for(appid)
sourcescfg = config.read_sources_file()
try:
@@ -444,14 +444,14 @@
arguments = '<instance>'
options = (
- ("debug",
+ ('debug',
{'short': 'D', 'action' : 'store_true',
'help': 'start server in debug mode.'}),
)
def run(self, args):
from cubicweb.server.server import RepositoryServer
- appid = pop_arg(args, msg="No instance specified !")
+ appid = pop_arg(args, msg='No instance specified !')
config = ServerConfiguration.config_for(appid)
debug = self.config.debug
# create the server
@@ -473,6 +473,7 @@
def _remote_dump(host, appid, output, sudo=False):
+ # XXX generate unique/portable file name
dmpcmd = 'cubicweb-ctl db-dump -o /tmp/%s.dump %s' % (appid, appid)
if sudo:
dmpcmd = 'sudo %s' % (dmpcmd)
@@ -497,14 +498,16 @@
def _local_dump(appid, output):
config = ServerConfiguration.config_for(appid)
# schema=1 to avoid unnecessary schema loading
- mih = config.migration_handler(connect=False, schema=1)
+ mih = config.migration_handler(connect=False, schema=1, verbosity=1)
mih.backup_database(output, askconfirm=False)
+ mih.shutdown()
-def _local_restore(appid, backupfile, drop):
+def _local_restore(appid, backupfile, drop, systemonly=True):
config = ServerConfiguration.config_for(appid)
+ config.verbosity = 1 # else we won't be asked for confirmation on problems
# schema=1 to avoid unnecessary schema loading
- mih = config.migration_handler(connect=False, schema=1)
- mih.restore_database(backupfile, drop)
+ mih = config.migration_handler(connect=False, schema=1, verbosity=1)
+ mih.restore_database(backupfile, drop, systemonly, askconfirm=False)
repo = mih.repo_connect()
# version of the database
dbversions = repo.get_versions()
@@ -542,12 +545,12 @@
try:
softversion = config.cube_version(cube)
except ConfigurationError:
- print "-> Error: no cube version information for %s, please check that the cube is installed." % cube
+ print '-> Error: no cube version information for %s, please check that the cube is installed.' % cube
continue
try:
applversion = vcconf[cube]
except KeyError:
- print "-> Error: no cube version information for %s in version configuration." % cube
+ print '-> Error: no cube version information for %s in version configuration.' % cube
continue
if softversion == applversion:
continue
@@ -569,8 +572,8 @@
arguments = '<instance>'
options = (
- ("output",
- {'short': 'o', 'type' : "string", 'metavar' : '<file>',
+ ('output',
+ {'short': 'o', 'type' : 'string', 'metavar' : '<file>',
'default' : None,
'help': 'Specify the backup file where the backup will be stored.'}
),
@@ -582,7 +585,7 @@
)
def run(self, args):
- appid = pop_arg(args, 1, msg="No instance specified !")
+ appid = pop_arg(args, 1, msg='No instance specified !')
if ':' in appid:
host, appid = appid.split(':')
_remote_dump(host, appid, self.config.output, self.config.sudo)
@@ -600,18 +603,26 @@
arguments = '<instance> <backupfile>'
options = (
- ("no-drop",
- {'short': 'n', 'action' : 'store_true',
- 'default' : False,
+ ('no-drop',
+ {'short': 'n', 'action' : 'store_true', 'default' : False,
'help': 'for some reason the database doesn\'t exist and so '
'should not be dropped.'}
),
+ ('restore-all',
+ {'short': 'r', 'action' : 'store_true', 'default' : False,
+ 'help': 'restore everything, eg not only the system source database '
+ 'but also data for all sources supporting backup/restore and custom '
+ 'instance data. In that case, <backupfile> is expected to be the '
+ 'timestamp of the backup to restore, not a file'}
+ ),
)
def run(self, args):
- appid = pop_arg(args, 1, msg="No instance specified !")
- backupfile = pop_arg(args, msg="No backup file specified !")
- _local_restore(appid, backupfile, not self.config.no_drop)
+ appid = pop_arg(args, 1, msg='No instance specified !')
+ backupfile = pop_arg(args, msg='No backup file or timestamp specified !')
+ _local_restore(appid, backupfile,
+ drop=not self.config.no_drop,
+ systemonly=not self.config.restore_all)
class DBCopyCommand(Command):
@@ -628,13 +639,13 @@
arguments = '<src-instance> <dest-instance>'
options = (
- ("no-drop",
+ ('no-drop',
{'short': 'n', 'action' : 'store_true',
'default' : False,
'help': 'For some reason the database doesn\'t exist and so '
'should not be dropped.'}
),
- ("keep-dump",
+ ('keep-dump',
{'short': 'k', 'action' : 'store_true',
'default' : False,
'help': 'Specify that the dump file should not be automatically removed.'}
@@ -648,8 +659,8 @@
def run(self, args):
import tempfile
- srcappid = pop_arg(args, 1, msg="No source instance specified !")
- destappid = pop_arg(args, msg="No destination instance specified !")
+ srcappid = pop_arg(args, 1, msg='No source instance specified !')
+ destappid = pop_arg(args, msg='No destination instance specified !')
_, output = tempfile.mkstemp()
if ':' in srcappid:
host, srcappid = srcappid.split(':')
@@ -673,27 +684,27 @@
arguments = '<instance>'
options = (
- ("checks",
- {'short': 'c', 'type' : "csv", 'metavar' : '<check list>',
+ ('checks',
+ {'short': 'c', 'type' : 'csv', 'metavar' : '<check list>',
'default' : ('entities', 'relations', 'metadata', 'schema', 'text_index'),
'help': 'Comma separated list of check to run. By default run all \
checks, i.e. entities, relations, text_index and metadata.'}
),
- ("autofix",
- {'short': 'a', 'type' : "yn", 'metavar' : '<yes or no>',
+ ('autofix',
+ {'short': 'a', 'type' : 'yn', 'metavar' : '<yes or no>',
'default' : False,
'help': 'Automatically correct integrity problems if this option \
is set to "y" or "yes", else only display them'}
),
- ("reindex",
- {'short': 'r', 'type' : "yn", 'metavar' : '<yes or no>',
+ ('reindex',
+ {'short': 'r', 'type' : 'yn', 'metavar' : '<yes or no>',
'default' : False,
'help': 're-indexes the database for full text search if this \
option is set to "y" or "yes" (may be long for large database).'}
),
- ("force",
- {'short': 'f', 'action' : "store_true",
+ ('force',
+ {'short': 'f', 'action' : 'store_true',
'default' : False,
'help': 'don\'t check instance is up to date.'}
),
@@ -702,7 +713,7 @@
def run(self, args):
from cubicweb.server.checkintegrity import check
- appid = pop_arg(args, 1, msg="No instance specified !")
+ appid = pop_arg(args, 1, msg='No instance specified !')
config = ServerConfiguration.config_for(appid)
config.repairing = self.config.force
repo, cnx = repo_cnx(config)
@@ -723,7 +734,7 @@
def run(self, args):
from cubicweb.server.checkintegrity import reindex_entities
- appid = pop_arg(args, 1, msg="No instance specified !")
+ appid = pop_arg(args, 1, msg='No instance specified !')
config = ServerConfiguration.config_for(appid)
repo, cnx = repo_cnx(config)
session = repo._get_session(cnx.sessionid, setpool=True)
@@ -744,7 +755,7 @@
arguments = '<instance>'
def run(self, args):
- appid = pop_arg(args, msg="No instance specified !")
+ appid = pop_arg(args, msg='No instance specified !')
config = ServerConfiguration.config_for(appid)
mih = config.migration_handler()
mih.cmd_synchronize_schema()
--- a/server/sources/__init__.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/sources/__init__.py Fri Jul 24 17:50:41 2009 +0200
@@ -7,6 +7,7 @@
"""
__docformat__ = "restructuredtext en"
+from os.path import join, splitext
from datetime import datetime, timedelta
from logging import getLogger
@@ -14,6 +15,7 @@
from cubicweb.server.sqlutils import SQL_PREFIX
+
class TimedCache(dict):
def __init__(self, ttlm, ttls=0):
# time to live in minutes
@@ -71,6 +73,42 @@
"""method called by the repository once ready to handle request"""
pass
+ def backup_file(self, backupfile=None, timestamp=None):
+ """return a unique file name for a source's dump
+
+ either backupfile or timestamp (used to generated a backup file name if
+ needed) should be specified.
+ """
+ if backupfile is None:
+ config = self.repo.config
+ return join(config.appdatahome, 'backup',
+ '%s-%s-%s.dump' % (config.appid, timestamp, self.uri))
+ # backup file is the system database backup file, add uri to it if not
+ # already there
+ base, ext = splitext(backupfile)
+ if not base.endswith('-%s' % self.uri):
+ return '%s-%s%s' % (base, self.uri, ext)
+ return backupfile
+
+ def backup(self, confirm, backupfile=None, timestamp=None,
+ askconfirm=False):
+ """method called to create a backup of source's data"""
+ pass
+
+ def restore(self, confirm, backupfile=None, timestamp=None, drop=True,
+ askconfirm=False):
+ """method called to restore a backup of source's data"""
+ pass
+
+ def close_pool_connections(self):
+ for pool in self.repo.pools:
+ pool._cursors.pop(self.uri, None)
+ pool.source_cnxs[self.uri][1].close()
+
+ def open_pool_connections(self):
+ for pool in self.repo.pools:
+ pool.source_cnxs[self.uri] = (self, self.get_connection())
+
def reset_caches(self):
"""method called during test to reset potential source caches"""
pass
--- a/server/sources/extlite.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/sources/extlite.py Fri Jul 24 17:50:41 2009 +0200
@@ -11,7 +11,8 @@
from os.path import join, exists
from cubicweb import server
-from cubicweb.server.sqlutils import SQL_PREFIX, sqlexec, SQLAdapterMixIn
+from cubicweb.server.sqlutils import (SQL_PREFIX, SQLAdapterMixIn, sqlexec,
+ sql_source_backup, sql_source_restore)
from cubicweb.server.sources import AbstractSource, native
from cubicweb.server.sources.rql2sql import SQLGenerator
@@ -85,6 +86,19 @@
AbstractSource.__init__(self, repo, appschema, source_config,
*args, **kwargs)
+ def backup(self, confirm, backupfile=None, timestamp=None, askconfirm=False):
+ """method called to create a backup of source's data"""
+ backupfile = self.backup_file(backupfile, timestamp)
+ sql_source_backup(self, self.sqladapter, confirm, backupfile,
+ askconfirm)
+
+ def restore(self, confirm, backupfile=None, timestamp=None, drop=True,
+ askconfirm=False):
+ """method called to restore a backup of source's data"""
+ backupfile = self.backup_file(backupfile, timestamp)
+ sql_source_restore(self, self.sqladapter, confirm, backupfile, drop,
+ askconfirm)
+
@property
def _sqlcnx(self):
# XXX: sqlite connections can only be used in the same thread, so
--- a/server/sources/native.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/sources/native.py Fri Jul 24 17:50:41 2009 +0200
@@ -25,7 +25,8 @@
from cubicweb import UnknownEid, AuthenticationError, Binary, server
from cubicweb.server.utils import crypt_password
-from cubicweb.server.sqlutils import SQL_PREFIX, SQLAdapterMixIn
+from cubicweb.server.sqlutils import (SQL_PREFIX, SQLAdapterMixIn,
+ sql_source_backup, sql_source_restore)
from cubicweb.server.rqlannotation import set_qdata
from cubicweb.server.sources import AbstractSource
from cubicweb.server.sources.rql2sql import SQLGenerator
@@ -199,6 +200,18 @@
pool.pool_reset()
self.repo._free_pool(pool)
+ def backup(self, confirm, backupfile=None, timestamp=None,
+ askconfirm=False):
+ """method called to create a backup of source's data"""
+ backupfile = self.backup_file(backupfile, timestamp)
+ sql_source_backup(self, self, confirm, backupfile, askconfirm)
+
+ def restore(self, confirm, backupfile=None, timestamp=None, drop=True,
+ askconfirm=False):
+ """method called to restore a backup of source's data"""
+ backupfile = self.backup_file(backupfile, timestamp)
+ sql_source_restore(self, self, confirm, backupfile, drop, askconfirm)
+
def init(self):
self.init_creating()
pool = self.repo._get_pool()
@@ -213,7 +226,7 @@
def map_attribute(self, etype, attr, cb):
self._rql_sqlgen.attr_map['%s.%s' % (etype, attr)] = cb
-
+
# ISource interface #######################################################
def compile_rql(self, rql):
--- a/server/sqlutils.py Fri Jul 24 17:03:32 2009 +0200
+++ b/server/sqlutils.py Fri Jul 24 17:50:41 2009 +0200
@@ -7,6 +7,8 @@
"""
__docformat__ = "restructuredtext en"
+import os
+from os.path import exists
from warnings import warn
from datetime import datetime, date, timedelta
@@ -21,6 +23,7 @@
from cubicweb import Binary, ConfigurationError
from cubicweb.utils import todate, todatetime
from cubicweb.common.uilib import remove_html_tags
+from cubicweb.toolsutils import restrict_perms_to_user
from cubicweb.server import SQL_CONNECT_HOOKS
from cubicweb.server.utils import crypt_password
@@ -116,6 +119,38 @@
skip_relations=skip_relations))
return '\n'.join(output)
+
+def sql_source_backup(source, sqladapter, confirm, backupfile,
+ askconfirm=False):
+ if exists(backupfile):
+ if not confirm('backup file %s exists, overwrite it?' % backupfile):
+ return
+ elif askconfirm and not confirm('backup %s %database?'
+ % source.repo.config.appid):
+ return
+ # should close opened connection before backuping
+ source.close_pool_connections()
+ try:
+ sqladapter.backup_to_file(backupfile, confirm)
+ finally:
+ source.open_pool_connections()
+
+def sql_source_restore(source, sqladapter, confirm, backupfile, drop=True,
+ askconfirm=False):
+ if not exists(backupfile):
+ raise Exception("backup file %s doesn't exist" % backupfile)
+ app = source.repo.config.appid
+ if askconfirm and not confirm('restore %s %s database from %s ?'
+ % (app, source.uri, backupfile)):
+ return
+ # should close opened connection before restoring
+ source.close_pool_connections()
+ try:
+ sqladapter.restore_from_file(backupfile, confirm, drop=drop)
+ finally:
+ source.open_pool_connections()
+
+
try:
from mx.DateTime import DateTimeType, DateTimeDeltaType
except ImportError:
@@ -159,6 +194,46 @@
#self.dbapi_module.type_code_test(cnx.cursor())
return cnx
+ def backup_to_file(self, backupfile, confirm):
+ cmd = self.dbhelper.backup_command(self.dbname, self.dbhost,
+ self.dbuser, backupfile,
+ keepownership=False)
+ while True:
+ print cmd
+ if os.system(cmd):
+ print 'error while backuping the base'
+ answer = confirm('continue anyway?',
+ shell=False, abort=False, retry=True)
+ if not answer:
+ raise SystemExit(1)
+ if answer == 1: # 1: continue, 2: retry
+ break
+ else:
+ print 'database backup:', backupfile
+ restrict_perms_to_user(backupfile, self.info)
+ break
+
+ def restore_from_file(self, backupfile, confirm, drop=True):
+ for cmd in self.dbhelper.restore_commands(self.dbname, self.dbhost,
+ self.dbuser, backupfile,
+ self.encoding,
+ keepownership=False,
+ drop=drop):
+ while True:
+ print cmd
+ if os.system(cmd):
+ print 'error while restoring the base'
+ print 'OOOOOPS', confirm
+ answer = confirm('continue anyway?',
+ shell=False, abort=False, retry=True)
+ if not answer:
+ raise SystemExit(1)
+ if answer == 1: # 1: continue, 2: retry
+ break
+ else:
+ break
+ print 'database restored'
+
def merge_args(self, args, query_args):
if args is not None:
args = dict(args)