Use repoapi instead of dbapi for cwctl shell, upgrade and db-init
Hopefully nobody uses dbapi-specific API in their migration scripts. I
guess we'll find out.
--- a/cwctl.py Tue Jan 28 16:07:06 2014 +0100
+++ b/cwctl.py Tue Feb 11 17:29:58 2014 +0100
@@ -781,7 +781,8 @@
if self.config.fs_only or toupgrade:
for cube, fromversion, toversion in toupgrade:
print '-> migration needed from %s to %s for %s' % (fromversion, toversion, cube)
- mih.migrate(vcconf, reversed(toupgrade), self.config)
+ with mih.cnx:
+ mih.migrate(vcconf, reversed(toupgrade), self.config)
else:
print '-> no data migration needed for instance %s.' % appid
# rewrite main configuration file
@@ -912,13 +913,14 @@
def _handle_networked(self, appuri):
""" returns migration context handler & shutdown function """
from cubicweb import AuthenticationError
- from cubicweb.dbapi import connect
+ from cubicweb.repoapi import connect, get_repository
from cubicweb.server.utils import manager_userpasswd
from cubicweb.server.migractions import ServerMigrationHelper
while True:
try:
login, pwd = manager_userpasswd(msg=None)
- cnx = connect(appuri, login=login, password=pwd, mulcnx=False)
+ repo = get_repository(appuri)
+ cnx = connect(repo, login=login, password=pwd, mulcnx=False)
except AuthenticationError as ex:
print ex
except (KeyboardInterrupt, EOFError):
@@ -948,15 +950,16 @@
else:
mih, shutdown_callback = self._handle_networked(appuri)
try:
- if args:
- # use cmdline parser to access left/right attributes only
- # remember that usage requires instance appid as first argument
- scripts, args = self.cmdline_parser.largs[1:], self.cmdline_parser.rargs
- for script in scripts:
- mih.cmd_process_script(script, scriptargs=args)
- mih.commit()
- else:
- mih.interactive_shell()
+ with mih.cnx:
+ if args:
+ # use cmdline parser to access left/right attributes only
+ # remember that usage requires instance appid as first argument
+ scripts, args = self.cmdline_parser.largs[1:], self.cmdline_parser.rargs
+ for script in scripts:
+ mih.cmd_process_script(script, scriptargs=args)
+ mih.commit()
+ else:
+ mih.interactive_shell()
finally:
shutdown_callback()
--- a/server/__init__.py Tue Jan 28 16:07:06 2014 +0100
+++ b/server/__init__.py Tue Feb 11 17:29:58 2014 +0100
@@ -202,7 +202,7 @@
with the minimal set of entities (ie at least the schema, base groups and
a initial user)
"""
- from cubicweb.dbapi import in_memory_repo_cnx
+ from cubicweb.repoapi import get_repository, connect
from cubicweb.server.repository import Repository
from cubicweb.server.utils import manager_userpasswd
from cubicweb.server.sqlutils import sqlexec, sqlschema, sql_drop_all_user_tables
@@ -281,21 +281,21 @@
repo.shutdown()
# reloging using the admin user
config._cubes = None # avoid assertion error
- repo, cnx = in_memory_repo_cnx(config, login, password=pwd)
- repo.system_source.eid = ssource.eid # redo this manually
- handler = config.migration_handler(schema, interactive=False,
- cnx=cnx, repo=repo)
- # install additional driver specific sql files
- handler.cmd_install_custom_sql_scripts()
- for cube in reversed(config.cubes()):
- handler.cmd_install_custom_sql_scripts(cube)
- # serialize the schema
- initialize_schema(config, schema, handler)
- # yoo !
- cnx.commit()
- repo.system_source.init_creating()
- cnx.commit()
- cnx.close()
+ repo = get_repository(config=config)
+ with connect(repo, login, password=pwd) as cnx:
+ repo.system_source.eid = ssource.eid # redo this manually
+ handler = config.migration_handler(schema, interactive=False,
+ cnx=cnx, repo=repo)
+ # install additional driver specific sql files
+ handler.cmd_install_custom_sql_scripts()
+ for cube in reversed(config.cubes()):
+ handler.cmd_install_custom_sql_scripts(cube)
+ # serialize the schema
+ initialize_schema(config, schema, handler)
+ # yoo !
+ cnx.commit()
+ repo.system_source.init_creating()
+ cnx.commit()
repo.shutdown()
# restore initial configuration
config.creating = False
@@ -308,13 +308,13 @@
def initialize_schema(config, schema, mhandler, event='create'):
from cubicweb.server.schemaserial import serialize_schema
- session = mhandler.session
+ cnx = mhandler.cnx
cubes = config.cubes()
# deactivate every hooks but those responsible to set metadata
# so, NO INTEGRITY CHECKS are done, to have quicker db creation.
# Active integrity is kept else we may pb such as two default
# workflows for one entity type.
- with session.deny_all_hooks_but('metadata', 'activeintegrity'):
+ with cnx._cnx.deny_all_hooks_but('metadata', 'activeintegrity'):
# execute cubicweb's pre<event> script
mhandler.cmd_exec_event_script('pre%s' % event)
# execute cubes pre<event> script if any
@@ -323,8 +323,7 @@
# execute instance's pre<event> script (useful in tests)
mhandler.cmd_exec_event_script('pre%s' % event, apphome=True)
# enter instance'schema into the database
- session.set_cnxset()
- serialize_schema(session, schema)
+ serialize_schema(cnx, schema)
# execute cubicweb's post<event> script
mhandler.cmd_exec_event_script('post%s' % event)
# execute cubes'post<event> script if any
--- a/server/migractions.py Tue Jan 28 16:07:06 2014 +0100
+++ b/server/migractions.py Tue Feb 11 17:29:58 2014 +0100
@@ -53,7 +53,7 @@
PURE_VIRTUAL_RTYPES,
CubicWebRelationSchema, order_eschemas)
from cubicweb.cwvreg import CW_EVENT_MANAGER
-from cubicweb.dbapi import get_repository, _repo_connect
+from cubicweb import repoapi
from cubicweb.migration import MigrationHelper, yes
from cubicweb.server import hook, schemaserial as ss
from cubicweb.server.utils import manager_userpasswd
@@ -125,7 +125,7 @@
@cached
def repo_connect(self):
- self.repo = get_repository(config=self.config)
+ self.repo = repoapi.get_repository(config=self.config)
return self.repo
def cube_upgraded(self, cube, version):
@@ -268,7 +268,7 @@
login, pwd = manager_userpasswd()
while True:
try:
- self._cnx = _repo_connect(self.repo, login, password=pwd)
+ self._cnx = repoapi.connect(self.repo, login, password=pwd)
if not 'managers' in self._cnx.user(self.session).groups:
print 'migration need an account in the managers group'
else:
@@ -327,7 +327,7 @@
'schema': self.repo.get_schema(),
'cnx': self.cnx,
'fsschema': self.fs_schema,
- 'session' : self.session,
+ 'session' : self.cnx._cnx,
'repo' : self.repo,
})
return context
@@ -335,12 +335,12 @@
@cached
def group_mapping(self):
"""cached group mapping"""
- return ss.group_mapping(self._cw)
+ return ss.group_mapping(self.cnx)
@cached
def cstrtype_mapping(self):
"""cached constraint types mapping"""
- return ss.cstrtype_mapping(self._cw)
+ return ss.cstrtype_mapping(self.cnx)
def cmd_exec_event_script(self, event, cube=None, funcname=None,
*args, **kwargs):
@@ -809,7 +809,7 @@
groupmap = self.group_mapping()
cstrtypemap = self.cstrtype_mapping()
# register the entity into CWEType
- execute = self._cw.execute
+ execute = self.cnx.execute
ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap))
# add specializes relation if needed
specialized = eschema.specializes()
@@ -1038,7 +1038,7 @@
"""
reposchema = self.repo.schema
rschema = self.fs_schema.rschema(rtype)
- execute = self._cw.execute
+ execute = self.cnx.execute
if rtype in reposchema:
print 'warning: relation type %s is already known, skip addition' % (
rtype)
@@ -1110,7 +1110,7 @@
subjtype, rtype, objtype)
return
rdef = self._get_rdef(rschema, subjtype, objtype)
- ss.execschemarql(self._cw.execute, rdef,
+ ss.execschemarql(self.cnx.execute, rdef,
ss.rdef2rql(rdef, self.cstrtype_mapping(),
self.group_mapping()))
if commit:
@@ -1337,14 +1337,6 @@
# other data migration commands ###########################################
- @property
- def _cw(self):
- session = self.session
- if session is not None:
- session.set_cnxset()
- return session
- return self.cnx.request()
-
def cmd_storage_changed(self, etype, attribute):
"""migrate entities to a custom storage. The new storage is expected to
be set, it will be temporarily removed for the migration.
@@ -1368,14 +1360,14 @@
def cmd_create_entity(self, etype, commit=False, **kwargs):
"""add a new entity of the given type"""
- entity = self._cw.create_entity(etype, **kwargs)
+ entity = self.cnx.create_entity(etype, **kwargs)
if commit:
self.commit()
return entity
def cmd_find_entities(self, etype, **kwargs):
"""find entities of the given type and attribute values"""
- return self._cw.find_entities(etype, **kwargs)
+ return self.cnx.find_entities(etype, **kwargs)
def cmd_find_one_entity(self, etype, **kwargs):
"""find one entity of the given type and attribute values.
@@ -1383,7 +1375,7 @@
raise :exc:`cubicweb.req.FindEntityError` if can not return one and only
one entity.
"""
- return self._cw.find_one_entity(etype, **kwargs)
+ return self.cnx.find_one_entity(etype, **kwargs)
def cmd_update_etype_fti_weight(self, etype, weight):
if self.repo.system_source.dbdriver == 'postgres':
@@ -1442,7 +1434,7 @@
"""
if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
try:
- cu = self.session.system_sql(sql, args)
+ cu = self.cnx._cnx.system_sql(sql, args)
except Exception:
ex = sys.exc_info()[1]
if self.confirm('Error: %s\nabort?' % ex, pdb=True):
@@ -1460,7 +1452,7 @@
if not isinstance(rql, (tuple, list)):
rql = ( (rql, kwargs), )
res = None
- execute = self._cw.execute
+ execute = self.cnx.execute
for rql, kwargs in rql:
if kwargs:
msg = '%s (%s)' % (rql, kwargs)