1 # copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved. |
|
2 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr |
|
3 # |
|
4 # This file is part of CubicWeb. |
|
5 # |
|
6 # CubicWeb is free software: you can redistribute it and/or modify it under the |
|
7 # terms of the GNU Lesser General Public License as published by the Free |
|
8 # Software Foundation, either version 2.1 of the License, or (at your option) |
|
9 # any later version. |
|
10 # |
|
11 # CubicWeb is distributed in the hope that it will be useful, but WITHOUT |
|
12 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
|
13 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more |
|
14 # details. |
|
15 # |
|
16 # You should have received a copy of the GNU Lesser General Public License along |
|
17 # with CubicWeb. If not, see <http://www.gnu.org/licenses/>. |
|
18 """a class implementing basic actions used in migration scripts. |
|
19 |
|
20 The following schema actions are supported for now: |
|
21 * add/drop/rename attribute |
|
22 * add/drop entity/relation type |
|
23 * rename entity type |
|
24 |
|
25 The following data actions are supported for now: |
|
26 * add an entity |
|
27 * execute raw RQL queries |
|
28 """ |
|
29 from __future__ import print_function |
|
30 |
|
31 __docformat__ = "restructuredtext en" |
|
32 |
|
33 import sys |
|
34 import os |
|
35 import tarfile |
|
36 import tempfile |
|
37 import shutil |
|
38 import os.path as osp |
|
39 from datetime import datetime |
|
40 from glob import glob |
|
41 from copy import copy |
|
42 from warnings import warn |
|
43 from contextlib import contextmanager |
|
44 |
|
45 from six import PY2, text_type |
|
46 |
|
47 from logilab.common.deprecation import deprecated |
|
48 from logilab.common.decorators import cached, clear_cache |
|
49 |
|
50 from yams.buildobjs import EntityType |
|
51 from yams.constraints import SizeConstraint |
|
52 from yams.schema import RelationDefinitionSchema |
|
53 |
|
54 from cubicweb import CW_SOFTWARE_ROOT, AuthenticationError, ExecutionError |
|
55 from cubicweb.predicates import is_instance |
|
56 from cubicweb.schema import (ETYPE_NAME_MAP, META_RTYPES, VIRTUAL_RTYPES, |
|
57 PURE_VIRTUAL_RTYPES, |
|
58 CubicWebRelationSchema, order_eschemas) |
|
59 from cubicweb.cwvreg import CW_EVENT_MANAGER |
|
60 from cubicweb import repoapi |
|
61 from cubicweb.migration import MigrationHelper, yes |
|
62 from cubicweb.server import hook, schemaserial as ss |
|
63 from cubicweb.server.schema2sql import eschema2sql, rschema2sql, unique_index_name, sql_type |
|
64 from cubicweb.server.utils import manager_userpasswd |
|
65 from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX |
|
66 |
|
67 |
|
68 class ClearGroupMap(hook.Hook): |
|
69 __regid__ = 'cw.migration.clear_group_mapping' |
|
70 __select__ = hook.Hook.__select__ & is_instance('CWGroup') |
|
71 events = ('after_add_entity', 'after_update_entity',) |
|
72 def __call__(self): |
|
73 clear_cache(self.mih, 'group_mapping') |
|
74 self.mih._synchronized.clear() |
|
75 |
|
76 @classmethod |
|
77 def mih_register(cls, repo): |
|
78 # may be already registered in tests (e.g. unittest_migractions at |
|
79 # least) |
|
80 if not cls.__regid__ in repo.vreg['after_add_entity_hooks']: |
|
81 repo.vreg.register(ClearGroupMap) |
|
82 |
|
83 |
|
84 class ServerMigrationHelper(MigrationHelper): |
|
85 """specific migration helper for server side migration scripts, |
|
86 providing actions related to schema/data migration |
|
87 """ |
|
88 |
|
89 def __init__(self, config, schema, interactive=True, |
|
90 repo=None, cnx=None, verbosity=1, connect=True): |
|
91 MigrationHelper.__init__(self, config, interactive, verbosity) |
|
92 if not interactive: |
|
93 assert cnx |
|
94 assert repo |
|
95 if cnx is not None: |
|
96 assert repo |
|
97 self.cnx = cnx |
|
98 self.repo = repo |
|
99 self.session = cnx.session |
|
100 elif connect: |
|
101 self.repo = config.repository() |
|
102 self.set_cnx() |
|
103 else: |
|
104 self.session = None |
|
105 # no config on shell to a remote instance |
|
106 if config is not None and (cnx or connect): |
|
107 repo = self.repo |
|
108 # register a hook to clear our group_mapping cache and the |
|
109 # self._synchronized set when some group is added or updated |
|
110 ClearGroupMap.mih = self |
|
111 ClearGroupMap.mih_register(repo) |
|
112 CW_EVENT_MANAGER.bind('after-registry-reload', |
|
113 ClearGroupMap.mih_register, repo) |
|
114 # notify we're starting maintenance (called instead of server_start |
|
115 # which is called on regular start |
|
116 repo.hm.call_hooks('server_maintenance', repo=repo) |
|
117 if not schema and not config.quick_start: |
|
118 insert_lperms = self.repo.get_versions()['cubicweb'] < (3, 14, 0) and 'localperms' in config.available_cubes() |
|
119 if insert_lperms: |
|
120 cubes = config._cubes |
|
121 config._cubes += ('localperms',) |
|
122 try: |
|
123 schema = config.load_schema(expand_cubes=True) |
|
124 finally: |
|
125 if insert_lperms: |
|
126 config._cubes = cubes |
|
127 self.fs_schema = schema |
|
128 self._synchronized = set() |
|
129 |
|
130 # overriden from base MigrationHelper ###################################### |
|
131 |
|
132 def set_cnx(self): |
|
133 try: |
|
134 login = self.repo.config.default_admin_config['login'] |
|
135 pwd = self.repo.config.default_admin_config['password'] |
|
136 except KeyError: |
|
137 login, pwd = manager_userpasswd() |
|
138 while True: |
|
139 try: |
|
140 self.cnx = repoapi.connect(self.repo, login, password=pwd) |
|
141 if not 'managers' in self.cnx.user.groups: |
|
142 print('migration need an account in the managers group') |
|
143 else: |
|
144 break |
|
145 except AuthenticationError: |
|
146 print('wrong user/password') |
|
147 except (KeyboardInterrupt, EOFError): |
|
148 print('aborting...') |
|
149 sys.exit(0) |
|
150 try: |
|
151 login, pwd = manager_userpasswd() |
|
152 except (KeyboardInterrupt, EOFError): |
|
153 print('aborting...') |
|
154 sys.exit(0) |
|
155 self.session = self.repo._get_session(self.cnx.sessionid) |
|
156 |
|
157 def cube_upgraded(self, cube, version): |
|
158 self.cmd_set_property('system.version.%s' % cube.lower(), |
|
159 text_type(version)) |
|
160 self.commit() |
|
161 |
|
162 def shutdown(self): |
|
163 if self.repo is not None: |
|
164 self.repo.shutdown() |
|
165 |
|
166 def migrate(self, vcconf, toupgrade, options): |
|
167 if not options.fs_only: |
|
168 if options.backup_db is None: |
|
169 self.backup_database() |
|
170 elif options.backup_db: |
|
171 self.backup_database(askconfirm=False) |
|
172 # disable notification during migration |
|
173 with self.cnx.allow_all_hooks_but('notification'): |
|
174 super(ServerMigrationHelper, self).migrate(vcconf, toupgrade, options) |
|
175 |
|
176 def cmd_process_script(self, migrscript, funcname=None, *args, **kwargs): |
|
177 try: |
|
178 return super(ServerMigrationHelper, self).cmd_process_script( |
|
179 migrscript, funcname, *args, **kwargs) |
|
180 except ExecutionError as err: |
|
181 sys.stderr.write("-> %s\n" % err) |
|
182 except BaseException: |
|
183 self.rollback() |
|
184 raise |
|
185 |
|
186 # Adjust docstring |
|
187 cmd_process_script.__doc__ = MigrationHelper.cmd_process_script.__doc__ |
|
188 |
|
189 # server specific migration methods ######################################## |
|
190 |
|
191 def backup_database(self, backupfile=None, askconfirm=True, format='native'): |
|
192 config = self.config |
|
193 repo = self.repo |
|
194 # paths |
|
195 timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') |
|
196 instbkdir = osp.join(config.appdatahome, 'backup') |
|
197 if not osp.exists(instbkdir): |
|
198 os.makedirs(instbkdir) |
|
199 backupfile = backupfile or osp.join(instbkdir, '%s-%s.tar.gz' |
|
200 % (config.appid, timestamp)) |
|
201 # check backup has to be done |
|
202 if osp.exists(backupfile) and not \ |
|
203 self.confirm('Backup file %s exists, overwrite it?' % backupfile): |
|
204 print('-> no backup done.') |
|
205 return |
|
206 elif askconfirm and not self.confirm('Backup %s database?' % config.appid): |
|
207 print('-> no backup done.') |
|
208 return |
|
209 open(backupfile,'w').close() # kinda lock |
|
210 os.chmod(backupfile, 0o600) |
|
211 # backup |
|
212 source = repo.system_source |
|
213 tmpdir = tempfile.mkdtemp() |
|
214 try: |
|
215 failed = False |
|
216 try: |
|
217 source.backup(osp.join(tmpdir, source.uri), self.confirm, format=format) |
|
218 except Exception as ex: |
|
219 print('-> error trying to backup %s [%s]' % (source.uri, ex)) |
|
220 if not self.confirm('Continue anyway?', default='n'): |
|
221 raise SystemExit(1) |
|
222 else: |
|
223 failed = True |
|
224 with open(osp.join(tmpdir, 'format.txt'), 'w') as format_file: |
|
225 format_file.write('%s\n' % format) |
|
226 with open(osp.join(tmpdir, 'versions.txt'), 'w') as version_file: |
|
227 versions = repo.get_versions() |
|
228 for cube, version in versions.items(): |
|
229 version_file.write('%s %s\n' % (cube, version)) |
|
230 if not failed: |
|
231 bkup = tarfile.open(backupfile, 'w|gz') |
|
232 for filename in os.listdir(tmpdir): |
|
233 bkup.add(osp.join(tmpdir, filename), filename) |
|
234 bkup.close() |
|
235 # call hooks |
|
236 repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp) |
|
237 # done |
|
238 print('-> backup file', backupfile) |
|
239 finally: |
|
240 shutil.rmtree(tmpdir) |
|
241 |
|
242 def restore_database(self, backupfile, drop=True, askconfirm=True, format='native'): |
|
243 # check |
|
244 if not osp.exists(backupfile): |
|
245 raise ExecutionError("Backup file %s doesn't exist" % backupfile) |
|
246 if askconfirm and not self.confirm('Restore %s database from %s ?' |
|
247 % (self.config.appid, backupfile)): |
|
248 return |
|
249 # unpack backup |
|
250 tmpdir = tempfile.mkdtemp() |
|
251 try: |
|
252 bkup = tarfile.open(backupfile, 'r|gz') |
|
253 except tarfile.ReadError: |
|
254 # assume restoring old backup |
|
255 shutil.copy(backupfile, osp.join(tmpdir, 'system')) |
|
256 else: |
|
257 for name in bkup.getnames(): |
|
258 if name[0] in '/.': |
|
259 raise ExecutionError('Security check failed, path starts with "/" or "."') |
|
260 bkup.close() # XXX seek error if not close+open !?! |
|
261 bkup = tarfile.open(backupfile, 'r|gz') |
|
262 bkup.extractall(path=tmpdir) |
|
263 bkup.close() |
|
264 if osp.isfile(osp.join(tmpdir, 'format.txt')): |
|
265 with open(osp.join(tmpdir, 'format.txt')) as format_file: |
|
266 written_format = format_file.readline().strip() |
|
267 if written_format in ('portable', 'native'): |
|
268 format = written_format |
|
269 self.config.init_cnxset_pool = False |
|
270 repo = self.repo = self.config.repository() |
|
271 source = repo.system_source |
|
272 try: |
|
273 source.restore(osp.join(tmpdir, source.uri), self.confirm, drop, format) |
|
274 except Exception as exc: |
|
275 print('-> error trying to restore %s [%s]' % (source.uri, exc)) |
|
276 if not self.confirm('Continue anyway?', default='n'): |
|
277 raise SystemExit(1) |
|
278 shutil.rmtree(tmpdir) |
|
279 # call hooks |
|
280 repo.init_cnxset_pool() |
|
281 repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile) |
|
282 print('-> database restored.') |
|
283 |
|
284 def commit(self): |
|
285 self.cnx.commit() |
|
286 |
|
287 def rollback(self): |
|
288 self.cnx.rollback() |
|
289 |
|
290 def rqlexecall(self, rqliter, ask_confirm=False): |
|
291 for rql, kwargs in rqliter: |
|
292 self.rqlexec(rql, kwargs, ask_confirm=ask_confirm) |
|
293 |
|
294 @cached |
|
295 def _create_context(self): |
|
296 """return a dictionary to use as migration script execution context""" |
|
297 context = super(ServerMigrationHelper, self)._create_context() |
|
298 context.update({'commit': self.checkpoint, |
|
299 'rollback': self.rollback, |
|
300 'sql': self.sqlexec, |
|
301 'rql': self.rqlexec, |
|
302 'rqliter': self.rqliter, |
|
303 'schema': self.repo.get_schema(), |
|
304 'cnx': self.cnx, |
|
305 'fsschema': self.fs_schema, |
|
306 'session' : self.cnx, |
|
307 'repo' : self.repo, |
|
308 }) |
|
309 return context |
|
310 |
|
311 @cached |
|
312 def group_mapping(self): |
|
313 """cached group mapping""" |
|
314 return ss.group_mapping(self.cnx) |
|
315 |
|
316 def cstrtype_mapping(self): |
|
317 """cached constraint types mapping""" |
|
318 return ss.cstrtype_mapping(self.cnx) |
|
319 |
|
320 def cmd_exec_event_script(self, event, cube=None, funcname=None, |
|
321 *args, **kwargs): |
|
322 """execute a cube event scripts `migration/<event>.py` where event |
|
323 is one of 'precreate', 'postcreate', 'preremove' and 'postremove'. |
|
324 """ |
|
325 assert event in ('precreate', 'postcreate', 'preremove', 'postremove') |
|
326 if cube: |
|
327 cubepath = self.config.cube_dir(cube) |
|
328 apc = osp.join(cubepath, 'migration', '%s.py' % event) |
|
329 elif kwargs.pop('apphome', False): |
|
330 apc = osp.join(self.config.apphome, 'migration', '%s.py' % event) |
|
331 else: |
|
332 apc = osp.join(self.config.migration_scripts_dir(), '%s.py' % event) |
|
333 if osp.exists(apc): |
|
334 if self.config.free_wheel: |
|
335 self.cmd_deactivate_verification_hooks() |
|
336 self.info('executing %s', apc) |
|
337 confirm = self.confirm |
|
338 execscript_confirm = self.execscript_confirm |
|
339 self.confirm = yes |
|
340 self.execscript_confirm = yes |
|
341 try: |
|
342 if event == 'postcreate': |
|
343 with self.cnx.allow_all_hooks_but(): |
|
344 return self.cmd_process_script(apc, funcname, *args, **kwargs) |
|
345 return self.cmd_process_script(apc, funcname, *args, **kwargs) |
|
346 finally: |
|
347 self.confirm = confirm |
|
348 self.execscript_confirm = execscript_confirm |
|
349 if self.config.free_wheel: |
|
350 self.cmd_reactivate_verification_hooks() |
|
351 |
|
352 def cmd_install_custom_sql_scripts(self, cube=None): |
|
353 """install a cube custom sql scripts `schema/*.<driver>.sql` where |
|
354 <driver> depends on the instance main database backend (eg 'postgres', |
|
355 'mysql'...) |
|
356 """ |
|
357 driver = self.repo.system_source.dbdriver |
|
358 if cube is None: |
|
359 directory = osp.join(CW_SOFTWARE_ROOT, 'schemas') |
|
360 else: |
|
361 directory = osp.join(self.config.cube_dir(cube), 'schema') |
|
362 sql_scripts = glob(osp.join(directory, '*.%s.sql' % driver)) |
|
363 for fpath in sql_scripts: |
|
364 print('-> installing', fpath) |
|
365 failed = sqlexec(open(fpath).read(), self.cnx.system_sql, False, |
|
366 delimiter=';;') |
|
367 if failed: |
|
368 print('-> ERROR, skipping', fpath) |
|
369 |
|
370 # schema synchronization internals ######################################## |
|
371 |
|
372 def _synchronize_permissions(self, erschema, teid): |
|
373 """permission synchronization for an entity or relation type""" |
|
374 assert teid, erschema |
|
375 if 'update' in erschema.ACTIONS or erschema.final: |
|
376 # entity type |
|
377 exprtype = u'ERQLExpression' |
|
378 else: |
|
379 # relation type |
|
380 exprtype = u'RRQLExpression' |
|
381 gm = self.group_mapping() |
|
382 confirm = self.verbosity >= 2 |
|
383 # * remove possibly deprecated permission (eg in the persistent schema |
|
384 # but not in the new schema) |
|
385 # * synchronize existing expressions |
|
386 # * add new groups/expressions |
|
387 for action in erschema.ACTIONS: |
|
388 perm = '%s_permission' % action |
|
389 # handle groups |
|
390 newgroups = list(erschema.get_groups(action)) |
|
391 for geid, gname in self.rqlexec('Any G, GN WHERE T %s G, G name GN, ' |
|
392 'T eid %%(x)s' % perm, {'x': teid}, |
|
393 ask_confirm=False): |
|
394 if not gname in newgroups: |
|
395 if not confirm or self.confirm('Remove %s permission of %s to %s?' |
|
396 % (action, erschema, gname)): |
|
397 self.rqlexec('DELETE T %s G WHERE G eid %%(x)s, T eid %s' |
|
398 % (perm, teid), |
|
399 {'x': geid}, ask_confirm=False) |
|
400 else: |
|
401 newgroups.remove(gname) |
|
402 for gname in newgroups: |
|
403 if not confirm or self.confirm('Grant %s permission of %s to %s?' |
|
404 % (action, erschema, gname)): |
|
405 try: |
|
406 self.rqlexec('SET T %s G WHERE G eid %%(x)s, T eid %s' |
|
407 % (perm, teid), |
|
408 {'x': gm[gname]}, ask_confirm=False) |
|
409 except KeyError: |
|
410 self.error('can grant %s perm to unexistant group %s', |
|
411 action, gname) |
|
412 # handle rql expressions |
|
413 newexprs = dict((expr.expression, expr) for expr in erschema.get_rqlexprs(action)) |
|
414 for expreid, expression in self.rqlexec('Any E, EX WHERE T %s E, E expression EX, ' |
|
415 'T eid %s' % (perm, teid), |
|
416 ask_confirm=False): |
|
417 if not expression in newexprs: |
|
418 if not confirm or self.confirm('Remove %s expression for %s permission of %s?' |
|
419 % (expression, action, erschema)): |
|
420 # deleting the relation will delete the expression entity |
|
421 self.rqlexec('DELETE T %s E WHERE E eid %%(x)s, T eid %s' |
|
422 % (perm, teid), |
|
423 {'x': expreid}, ask_confirm=False) |
|
424 else: |
|
425 newexprs.pop(expression) |
|
426 for expression in newexprs.values(): |
|
427 expr = expression.expression |
|
428 if not confirm or self.confirm('Add %s expression for %s permission of %s?' |
|
429 % (expr, action, erschema)): |
|
430 self.rqlexec('INSERT RQLExpression X: X exprtype %%(exprtype)s, ' |
|
431 'X expression %%(expr)s, X mainvars %%(vars)s, T %s X ' |
|
432 'WHERE T eid %%(x)s' % perm, |
|
433 {'expr': expr, 'exprtype': exprtype, |
|
434 'vars': u','.join(sorted(expression.mainvars)), |
|
435 'x': teid}, |
|
436 ask_confirm=False) |
|
437 |
|
438 def _synchronize_rschema(self, rtype, syncrdefs=True, |
|
439 syncperms=True, syncprops=True): |
|
440 """synchronize properties of the persistent relation schema against its |
|
441 current definition: |
|
442 |
|
443 * description |
|
444 * symmetric, meta |
|
445 * inlined |
|
446 * relation definitions if `syncrdefs` |
|
447 * permissions if `syncperms` |
|
448 |
|
449 physical schema changes should be handled by repository's schema hooks |
|
450 """ |
|
451 rtype = str(rtype) |
|
452 if rtype in self._synchronized: |
|
453 return |
|
454 if syncrdefs and syncperms and syncprops: |
|
455 self._synchronized.add(rtype) |
|
456 rschema = self.fs_schema.rschema(rtype) |
|
457 reporschema = self.repo.schema.rschema(rtype) |
|
458 if syncprops: |
|
459 assert reporschema.eid, reporschema |
|
460 self.rqlexecall(ss.updaterschema2rql(rschema, reporschema.eid), |
|
461 ask_confirm=self.verbosity>=2) |
|
462 if rschema.rule: |
|
463 if syncperms: |
|
464 self._synchronize_permissions(rschema, reporschema.eid) |
|
465 elif syncrdefs: |
|
466 for subj, obj in rschema.rdefs: |
|
467 if (subj, obj) not in reporschema.rdefs: |
|
468 continue |
|
469 if rschema in VIRTUAL_RTYPES: |
|
470 continue |
|
471 self._synchronize_rdef_schema(subj, rschema, obj, |
|
472 syncprops=syncprops, |
|
473 syncperms=syncperms) |
|
474 |
|
475 def _synchronize_eschema(self, etype, syncrdefs=True, |
|
476 syncperms=True, syncprops=True): |
|
477 """synchronize properties of the persistent entity schema against |
|
478 its current definition: |
|
479 |
|
480 * description |
|
481 * internationalizable, fulltextindexed, indexed, meta |
|
482 * relations from/to this entity |
|
483 * __unique_together__ |
|
484 * permissions if `syncperms` |
|
485 """ |
|
486 etype = str(etype) |
|
487 if etype in self._synchronized: |
|
488 return |
|
489 if syncrdefs and syncperms and syncprops: |
|
490 self._synchronized.add(etype) |
|
491 repoeschema = self.repo.schema.eschema(etype) |
|
492 try: |
|
493 eschema = self.fs_schema.eschema(etype) |
|
494 except KeyError: |
|
495 return # XXX somewhat unexpected, no?... |
|
496 if syncprops: |
|
497 repospschema = repoeschema.specializes() |
|
498 espschema = eschema.specializes() |
|
499 if repospschema and not espschema: |
|
500 self.rqlexec('DELETE X specializes Y WHERE X is CWEType, X name %(x)s', |
|
501 {'x': str(repoeschema)}, ask_confirm=False) |
|
502 elif not repospschema and espschema: |
|
503 self.rqlexec('SET X specializes Y WHERE X is CWEType, X name %(x)s, ' |
|
504 'Y is CWEType, Y name %(y)s', |
|
505 {'x': str(repoeschema), 'y': str(espschema)}, |
|
506 ask_confirm=False) |
|
507 self.rqlexecall(ss.updateeschema2rql(eschema, repoeschema.eid), |
|
508 ask_confirm=self.verbosity >= 2) |
|
509 if syncperms: |
|
510 self._synchronize_permissions(eschema, repoeschema.eid) |
|
511 if syncrdefs: |
|
512 for rschema, targettypes, role in eschema.relation_definitions(True): |
|
513 if rschema in VIRTUAL_RTYPES: |
|
514 continue |
|
515 if role == 'subject': |
|
516 if not rschema in repoeschema.subject_relations(): |
|
517 continue |
|
518 subjtypes, objtypes = [etype], targettypes |
|
519 else: # role == 'object' |
|
520 if not rschema in repoeschema.object_relations(): |
|
521 continue |
|
522 subjtypes, objtypes = targettypes, [etype] |
|
523 self._synchronize_rschema(rschema, syncrdefs=False, |
|
524 syncprops=syncprops, syncperms=syncperms) |
|
525 if rschema.rule: # rdef for computed rtype are infered hence should not be |
|
526 # synchronized |
|
527 continue |
|
528 reporschema = self.repo.schema.rschema(rschema) |
|
529 for subj in subjtypes: |
|
530 for obj in objtypes: |
|
531 if (subj, obj) not in reporschema.rdefs: |
|
532 continue |
|
533 self._synchronize_rdef_schema(subj, rschema, obj, |
|
534 syncprops=syncprops, syncperms=syncperms) |
|
535 if syncprops: # need to process __unique_together__ after rdefs were processed |
|
536 # mappings from constraint name to columns |
|
537 # filesystem (fs) and repository (repo) wise |
|
538 fs = {} |
|
539 repo = {} |
|
540 for cols in eschema._unique_together or (): |
|
541 fs[unique_index_name(repoeschema, cols)] = sorted(cols) |
|
542 schemaentity = self.cnx.entity_from_eid(repoeschema.eid) |
|
543 for entity in schemaentity.related('constraint_of', 'object', |
|
544 targettypes=('CWUniqueTogetherConstraint',)).entities(): |
|
545 repo[entity.name] = sorted(rel.name for rel in entity.relations) |
|
546 added = set(fs) - set(repo) |
|
547 removed = set(repo) - set(fs) |
|
548 |
|
549 for name in removed: |
|
550 self.rqlexec('DELETE CWUniqueTogetherConstraint C WHERE C name %(name)s', |
|
551 {'name': name}) |
|
552 |
|
553 def possible_unique_constraint(cols): |
|
554 for name in cols: |
|
555 rschema = repoeschema.subjrels.get(name) |
|
556 if rschema is None: |
|
557 print('dont add %s unique constraint on %s, missing %s' % ( |
|
558 ','.join(cols), eschema, name)) |
|
559 return False |
|
560 if not (rschema.final or rschema.inlined): |
|
561 print('dont add %s unique constraint on %s, %s is neither final nor inlined' % ( |
|
562 ','.join(cols), eschema, name)) |
|
563 return False |
|
564 return True |
|
565 |
|
566 for name in added: |
|
567 if possible_unique_constraint(fs[name]): |
|
568 rql, substs = ss._uniquetogether2rql(eschema, fs[name]) |
|
569 substs['x'] = repoeschema.eid |
|
570 substs['name'] = name |
|
571 self.rqlexec(rql, substs) |
|
572 |
|
573 def _synchronize_rdef_schema(self, subjtype, rtype, objtype, |
|
574 syncperms=True, syncprops=True): |
|
575 """synchronize properties of the persistent relation definition schema |
|
576 against its current definition: |
|
577 * order and other properties |
|
578 * constraints |
|
579 * permissions |
|
580 """ |
|
581 subjtype, objtype = str(subjtype), str(objtype) |
|
582 rschema = self.fs_schema.rschema(rtype) |
|
583 if rschema.rule: |
|
584 raise ExecutionError('Cannot synchronize a relation definition for a ' |
|
585 'computed relation (%s)' % rschema) |
|
586 reporschema = self.repo.schema.rschema(rschema) |
|
587 if (subjtype, rschema, objtype) in self._synchronized: |
|
588 return |
|
589 if syncperms and syncprops: |
|
590 self._synchronized.add((subjtype, rschema, objtype)) |
|
591 if rschema.symmetric: |
|
592 self._synchronized.add((objtype, rschema, subjtype)) |
|
593 rdef = rschema.rdef(subjtype, objtype) |
|
594 if rdef.infered: |
|
595 return # don't try to synchronize infered relation defs |
|
596 repordef = reporschema.rdef(subjtype, objtype) |
|
597 confirm = self.verbosity >= 2 |
|
598 if syncprops: |
|
599 # properties |
|
600 self.rqlexecall(ss.updaterdef2rql(rdef, repordef.eid), |
|
601 ask_confirm=confirm) |
|
602 # constraints |
|
603 # 0. eliminate the set of unmodified constraints from the sets of |
|
604 # old/new constraints |
|
605 newconstraints = set(rdef.constraints) |
|
606 oldconstraints = set(repordef.constraints) |
|
607 unchanged_constraints = newconstraints & oldconstraints |
|
608 newconstraints -= unchanged_constraints |
|
609 oldconstraints -= unchanged_constraints |
|
610 # 1. remove old constraints and update constraints of the same type |
|
611 # NOTE: don't use rschema.constraint_by_type because it may be |
|
612 # out of sync with newconstraints when multiple |
|
613 # constraints of the same type are used |
|
614 for cstr in oldconstraints: |
|
615 self.rqlexec('DELETE CWConstraint C WHERE C eid %(x)s', |
|
616 {'x': cstr.eid}, ask_confirm=confirm) |
|
617 # 2. add new constraints |
|
618 cstrtype_map = self.cstrtype_mapping() |
|
619 self.rqlexecall(ss.constraints2rql(cstrtype_map, newconstraints, |
|
620 repordef.eid), |
|
621 ask_confirm=confirm) |
|
622 if syncperms and not rschema in VIRTUAL_RTYPES: |
|
623 self._synchronize_permissions(rdef, repordef.eid) |
|
624 |
|
625 # base actions ############################################################ |
|
626 |
|
627 def checkpoint(self, ask_confirm=True): |
|
628 """checkpoint action""" |
|
629 if not ask_confirm or self.confirm('Commit now ?', shell=False): |
|
630 self.commit() |
|
631 |
|
632 def cmd_add_cube(self, cube, update_database=True): |
|
633 self.cmd_add_cubes( (cube,), update_database) |
|
634 |
|
635 def cmd_add_cubes(self, cubes, update_database=True): |
|
636 """update_database is telling if the database schema should be updated |
|
637 or if only the relevant eproperty should be inserted (for the case where |
|
638 a cube has been extracted from an existing instance, so the |
|
639 cube schema is already in there) |
|
640 """ |
|
641 newcubes = super(ServerMigrationHelper, self).cmd_add_cubes(cubes) |
|
642 if not newcubes: |
|
643 return |
|
644 for cube in newcubes: |
|
645 self.cmd_set_property('system.version.'+cube, |
|
646 self.config.cube_version(cube)) |
|
647 # ensure added cube is in config cubes |
|
648 # XXX worth restoring on error? |
|
649 if not cube in self.config._cubes: |
|
650 self.config._cubes += (cube,) |
|
651 if not update_database: |
|
652 self.commit() |
|
653 return |
|
654 newcubes_schema = self.config.load_schema(construction_mode='non-strict') |
|
655 # XXX we have to replace fs_schema, used in cmd_add_relation_type |
|
656 # etc. and fsschema of migration script contexts |
|
657 self.fs_schema = newcubes_schema |
|
658 self.update_context('fsschema', self.fs_schema) |
|
659 new = set() |
|
660 # execute pre-create files |
|
661 driver = self.repo.system_source.dbdriver |
|
662 for cube in reversed(newcubes): |
|
663 self.cmd_install_custom_sql_scripts(cube) |
|
664 self.cmd_exec_event_script('precreate', cube) |
|
665 # add new entity and relation types |
|
666 for rschema in newcubes_schema.relations(): |
|
667 if not rschema in self.repo.schema: |
|
668 self.cmd_add_relation_type(rschema.type) |
|
669 new.add(rschema.type) |
|
670 toadd = [eschema for eschema in newcubes_schema.entities() |
|
671 if not eschema in self.repo.schema] |
|
672 for eschema in order_eschemas(toadd): |
|
673 self.cmd_add_entity_type(eschema.type) |
|
674 new.add(eschema.type) |
|
675 # check if attributes has been added to existing entities |
|
676 for rschema in newcubes_schema.relations(): |
|
677 existingschema = self.repo.schema.rschema(rschema.type) |
|
678 for (fromtype, totype) in rschema.rdefs: |
|
679 # if rdef already exists or is infered from inheritance, |
|
680 # don't add it |
|
681 if (fromtype, totype) in existingschema.rdefs \ |
|
682 or rschema.rdefs[(fromtype, totype)].infered: |
|
683 continue |
|
684 # check we should actually add the relation definition |
|
685 if not (fromtype in new or totype in new or rschema in new): |
|
686 continue |
|
687 self.cmd_add_relation_definition(str(fromtype), rschema.type, |
|
688 str(totype)) |
|
689 # execute post-create files |
|
690 for cube in reversed(newcubes): |
|
691 with self.cnx.allow_all_hooks_but(): |
|
692 self.cmd_exec_event_script('postcreate', cube) |
|
693 self.commit() |
|
694 |
|
695 def cmd_drop_cube(self, cube, removedeps=False): |
|
696 removedcubes = super(ServerMigrationHelper, self).cmd_drop_cube( |
|
697 cube, removedeps) |
|
698 if not removedcubes: |
|
699 return |
|
700 fsschema = self.fs_schema |
|
701 removedcubes_schema = self.config.load_schema(construction_mode='non-strict') |
|
702 reposchema = self.repo.schema |
|
703 # execute pre-remove files |
|
704 for cube in reversed(removedcubes): |
|
705 self.cmd_exec_event_script('preremove', cube) |
|
706 # remove cubes'entity and relation types |
|
707 for rschema in fsschema.relations(): |
|
708 if not rschema in removedcubes_schema and rschema in reposchema: |
|
709 self.cmd_drop_relation_type(rschema.type) |
|
710 toremove = [eschema for eschema in fsschema.entities() |
|
711 if not eschema in removedcubes_schema |
|
712 and eschema in reposchema] |
|
713 for eschema in reversed(order_eschemas(toremove)): |
|
714 self.cmd_drop_entity_type(eschema.type) |
|
715 for rschema in fsschema.relations(): |
|
716 if rschema in removedcubes_schema and rschema in reposchema: |
|
717 # check if attributes/relations has been added to entities from |
|
718 # other cubes |
|
719 for fromtype, totype in rschema.rdefs: |
|
720 if (fromtype, totype) not in removedcubes_schema[rschema.type].rdefs and \ |
|
721 (fromtype, totype) in reposchema[rschema.type].rdefs: |
|
722 self.cmd_drop_relation_definition( |
|
723 str(fromtype), rschema.type, str(totype)) |
|
724 # execute post-remove files |
|
725 for cube in reversed(removedcubes): |
|
726 self.cmd_exec_event_script('postremove', cube) |
|
727 self.rqlexec('DELETE CWProperty X WHERE X pkey %(pk)s', |
|
728 {'pk': u'system.version.'+cube}, ask_confirm=False) |
|
729 self.commit() |
|
730 |
|
731 # schema migration actions ################################################ |
|
732 |
|
733 def cmd_add_attribute(self, etype, attrname, attrtype=None, commit=True): |
|
734 """add a new attribute on the given entity type""" |
|
735 if attrtype is None: |
|
736 rschema = self.fs_schema.rschema(attrname) |
|
737 attrtype = rschema.objects(etype)[0] |
|
738 self.cmd_add_relation_definition(etype, attrname, attrtype, commit=commit) |
|
739 |
|
740 def cmd_drop_attribute(self, etype, attrname, commit=True): |
|
741 """drop an existing attribute from the given entity type |
|
742 |
|
743 `attrname` is a string giving the name of the attribute to drop |
|
744 """ |
|
745 try: |
|
746 rschema = self.repo.schema.rschema(attrname) |
|
747 attrtype = rschema.objects(etype)[0] |
|
748 except KeyError: |
|
749 print('warning: attribute %s %s is not known, skip deletion' % ( |
|
750 etype, attrname)) |
|
751 else: |
|
752 self.cmd_drop_relation_definition(etype, attrname, attrtype, |
|
753 commit=commit) |
|
754 |
|
755 def cmd_rename_attribute(self, etype, oldname, newname, commit=True): |
|
756 """rename an existing attribute of the given entity type |
|
757 |
|
758 `oldname` is a string giving the name of the existing attribute |
|
759 `newname` is a string giving the name of the renamed attribute |
|
760 """ |
|
761 eschema = self.fs_schema.eschema(etype) |
|
762 attrtype = eschema.destination(newname) |
|
763 # have to commit this first step anyway to get the definition |
|
764 # actually in the schema |
|
765 self.cmd_add_attribute(etype, newname, attrtype, commit=True) |
|
766 # skipp NULL values if the attribute is required |
|
767 rql = 'SET X %s VAL WHERE X is %s, X %s VAL' % (newname, etype, oldname) |
|
768 card = eschema.rdef(newname).cardinality[0] |
|
769 if card == '1': |
|
770 rql += ', NOT X %s NULL' % oldname |
|
771 self.rqlexec(rql, ask_confirm=self.verbosity>=2) |
|
772 # XXX if both attributes fulltext indexed, should skip fti rebuild |
|
773 # XXX if old attribute was fti indexed but not the new one old value |
|
774 # won't be removed from the index (this occurs on other kind of |
|
775 # fulltextindexed change...) |
|
776 self.cmd_drop_attribute(etype, oldname, commit=commit) |
|
777 |
|
778 def cmd_add_entity_type(self, etype, auto=True, commit=True): |
|
779 """register a new entity type |
|
780 |
|
781 in auto mode, automatically register entity's relation where the |
|
782 targeted type is known |
|
783 """ |
|
784 instschema = self.repo.schema |
|
785 eschema = self.fs_schema.eschema(etype) |
|
786 if etype in instschema and not (eschema.final and eschema.eid is None): |
|
787 print('warning: %s already known, skip addition' % etype) |
|
788 return |
|
789 confirm = self.verbosity >= 2 |
|
790 groupmap = self.group_mapping() |
|
791 cstrtypemap = self.cstrtype_mapping() |
|
792 # register the entity into CWEType |
|
793 execute = self.cnx.execute |
|
794 if eschema.final and eschema not in instschema: |
|
795 # final types are expected to be in the living schema by default, but they are not if |
|
796 # the type is defined in a cube that is being added |
|
797 edef = EntityType(eschema.type, __permissions__=eschema.permissions) |
|
798 instschema.add_entity_type(edef) |
|
799 ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap)) |
|
800 # add specializes relation if needed |
|
801 specialized = eschema.specializes() |
|
802 if specialized: |
|
803 try: |
|
804 specialized.eid = instschema[specialized].eid |
|
805 except KeyError: |
|
806 raise ExecutionError('trying to add entity type but parent type is ' |
|
807 'not yet in the database schema') |
|
808 self.rqlexecall(ss.eschemaspecialize2rql(eschema), ask_confirm=confirm) |
|
809 # register entity's attributes |
|
810 for rschema, attrschema in eschema.attribute_definitions(): |
|
811 # ignore those meta relations, they will be automatically added |
|
812 if rschema.type in META_RTYPES: |
|
813 continue |
|
814 if not attrschema.type in instschema: |
|
815 self.cmd_add_entity_type(attrschema.type, False, False) |
|
816 if not rschema.type in instschema: |
|
817 # need to add the relation type and to commit to get it |
|
818 # actually in the schema |
|
819 self.cmd_add_relation_type(rschema.type, False, commit=True) |
|
820 # register relation definition |
|
821 rdef = self._get_rdef(rschema, eschema, eschema.destination(rschema)) |
|
822 ss.execschemarql(execute, rdef, ss.rdef2rql(rdef, cstrtypemap, groupmap),) |
|
823 # take care to newly introduced base class |
|
824 # XXX some part of this should probably be under the "if auto" block |
|
825 for spschema in eschema.specialized_by(recursive=False): |
|
826 try: |
|
827 instspschema = instschema[spschema] |
|
828 except KeyError: |
|
829 # specialized entity type not in schema, ignore |
|
830 continue |
|
831 if instspschema.specializes() != eschema: |
|
832 self.rqlexec('SET D specializes P WHERE D eid %(d)s, P name %(pn)s', |
|
833 {'d': instspschema.eid, 'pn': eschema.type}, |
|
834 ask_confirm=confirm) |
|
835 for rschema, tschemas, role in spschema.relation_definitions(True): |
|
836 for tschema in tschemas: |
|
837 if not tschema in instschema: |
|
838 continue |
|
839 if role == 'subject': |
|
840 subjschema = spschema |
|
841 objschema = tschema |
|
842 if rschema.final and rschema in instspschema.subjrels: |
|
843 # attribute already set, has_rdef would check if |
|
844 # it's of the same type, we don't want this so |
|
845 # simply skip here |
|
846 continue |
|
847 elif role == 'object': |
|
848 subjschema = tschema |
|
849 objschema = spschema |
|
850 if (rschema.rdef(subjschema, objschema).infered |
|
851 or (instschema.has_relation(rschema) and |
|
852 (subjschema, objschema) in instschema[rschema].rdefs)): |
|
853 continue |
|
854 self.cmd_add_relation_definition( |
|
855 subjschema.type, rschema.type, objschema.type) |
|
856 if auto: |
|
857 # we have commit here to get relation types actually in the schema |
|
858 self.commit() |
|
859 added = [] |
|
860 for rschema in eschema.subject_relations(): |
|
861 # attribute relation have already been processed and |
|
862 # 'owned_by'/'created_by' will be automatically added |
|
863 if rschema.final or rschema.type in META_RTYPES: |
|
864 continue |
|
865 rtypeadded = rschema.type in instschema |
|
866 for targetschema in rschema.objects(etype): |
|
867 # ignore relations where the targeted type is not in the |
|
868 # current instance schema |
|
869 targettype = targetschema.type |
|
870 if not targettype in instschema and targettype != etype: |
|
871 continue |
|
872 if not rtypeadded: |
|
873 # need to add the relation type and to commit to get it |
|
874 # actually in the schema |
|
875 added.append(rschema.type) |
|
876 self.cmd_add_relation_type(rschema.type, False, commit=True) |
|
877 rtypeadded = True |
|
878 # register relation definition |
|
879 # remember this two avoid adding twice non symmetric relation |
|
880 # such as "Emailthread forked_from Emailthread" |
|
881 added.append((etype, rschema.type, targettype)) |
|
882 rdef = self._get_rdef(rschema, eschema, targetschema) |
|
883 ss.execschemarql(execute, rdef, |
|
884 ss.rdef2rql(rdef, cstrtypemap, groupmap)) |
|
885 for rschema in eschema.object_relations(): |
|
886 if rschema.type in META_RTYPES: |
|
887 continue |
|
888 rtypeadded = rschema.type in instschema or rschema.type in added |
|
889 for targetschema in rschema.subjects(etype): |
|
890 # ignore relations where the targeted type is not in the |
|
891 # current instance schema |
|
892 targettype = targetschema.type |
|
893 # don't check targettype != etype since in this case the |
|
894 # relation has already been added as a subject relation |
|
895 if not targettype in instschema: |
|
896 continue |
|
897 if not rtypeadded: |
|
898 # need to add the relation type and to commit to get it |
|
899 # actually in the schema |
|
900 self.cmd_add_relation_type(rschema.type, False, commit=True) |
|
901 rtypeadded = True |
|
902 elif (targettype, rschema.type, etype) in added: |
|
903 continue |
|
904 # register relation definition |
|
905 rdef = self._get_rdef(rschema, targetschema, eschema) |
|
906 ss.execschemarql(execute, rdef, |
|
907 ss.rdef2rql(rdef, cstrtypemap, groupmap)) |
|
908 if commit: |
|
909 self.commit() |
|
910 |
|
911 def cmd_drop_entity_type(self, etype, commit=True): |
|
912 """Drop an existing entity type. |
|
913 |
|
914 This will trigger deletion of necessary relation types and definitions. |
|
915 Note that existing entities of the given type will be deleted without |
|
916 any hooks called. |
|
917 """ |
|
918 # XXX what if we delete an entity type which is specialized by other types |
|
919 # unregister the entity from CWEType |
|
920 self.rqlexec('DELETE CWEType X WHERE X name %(etype)s', {'etype': etype}, |
|
921 ask_confirm=self.verbosity>=2) |
|
922 if commit: |
|
923 self.commit() |
|
924 |
|
925 def cmd_rename_entity_type(self, oldname, newname, attrs=None, commit=True): |
|
926 """rename an existing entity type in the persistent schema |
|
927 |
|
928 `oldname` is a string giving the name of the existing entity type |
|
929 `newname` is a string giving the name of the renamed entity type |
|
930 """ |
|
931 schema = self.repo.schema |
|
932 if oldname not in schema: |
|
933 print('warning: entity type %s is unknown, skip renaming' % oldname) |
|
934 return |
|
935 # if merging two existing entity types |
|
936 if newname in schema: |
|
937 assert oldname in ETYPE_NAME_MAP, \ |
|
938 '%s should be mapped to %s in ETYPE_NAME_MAP' % (oldname, |
|
939 newname) |
|
940 if attrs is None: |
|
941 attrs = ','.join(SQL_PREFIX + rschema.type |
|
942 for rschema in schema[newname].subject_relations() |
|
943 if (rschema.final or rschema.inlined) |
|
944 and not rschema in PURE_VIRTUAL_RTYPES) |
|
945 else: |
|
946 attrs += ('eid', 'creation_date', 'modification_date', 'cwuri') |
|
947 attrs = ','.join(SQL_PREFIX + attr for attr in attrs) |
|
948 self.sqlexec('INSERT INTO %s%s(%s) SELECT %s FROM %s%s' % ( |
|
949 SQL_PREFIX, newname, attrs, attrs, SQL_PREFIX, oldname), |
|
950 ask_confirm=False) |
|
951 # old entity type has not been added to the schema, can't gather it |
|
952 new = schema.eschema(newname) |
|
953 oldeid = self.rqlexec('CWEType ET WHERE ET name %(on)s', |
|
954 {'on': oldname}, ask_confirm=False)[0][0] |
|
955 # backport old type relations to new type |
|
956 # XXX workflows, other relations? |
|
957 for r1, rr1 in [('from_entity', 'to_entity'), |
|
958 ('to_entity', 'from_entity')]: |
|
959 self.rqlexec('SET X %(r1)s NET WHERE X %(r1)s OET, ' |
|
960 'NOT EXISTS(X2 %(r1)s NET, X relation_type XRT, ' |
|
961 'X2 relation_type XRT, X %(rr1)s XTE, X2 %(rr1)s XTE), ' |
|
962 'OET eid %%(o)s, NET eid %%(n)s' % locals(), |
|
963 {'o': oldeid, 'n': new.eid}, ask_confirm=False) |
|
964 # backport is / is_instance_of relation to new type |
|
965 for rtype in ('is', 'is_instance_of'): |
|
966 self.sqlexec('UPDATE %s_relation SET eid_to=%s WHERE eid_to=%s' |
|
967 % (rtype, new.eid, oldeid), ask_confirm=False) |
|
968 # delete relations using SQL to avoid relations content removal |
|
969 # triggered by schema synchronization hooks. |
|
970 for rdeftype in ('CWRelation', 'CWAttribute'): |
|
971 thispending = set( (eid for eid, in self.sqlexec( |
|
972 'SELECT cw_eid FROM cw_%s WHERE cw_from_entity=%%(eid)s OR ' |
|
973 ' cw_to_entity=%%(eid)s' % rdeftype, |
|
974 {'eid': oldeid}, ask_confirm=False)) ) |
|
975 # we should add deleted eids into pending eids else we may |
|
976 # get some validation error on commit since integrity hooks |
|
977 # may think some required relation is missing... This also ensure |
|
978 # repository caches are properly cleanup |
|
979 hook.CleanupDeletedEidsCacheOp.get_instance(self.cnx).union(thispending) |
|
980 # and don't forget to remove record from system tables |
|
981 entities = [self.cnx.entity_from_eid(eid, rdeftype) for eid in thispending] |
|
982 self.repo.system_source.delete_info_multi(self.cnx, entities) |
|
983 self.sqlexec('DELETE FROM cw_%s WHERE cw_from_entity=%%(eid)s OR ' |
|
984 'cw_to_entity=%%(eid)s' % rdeftype, |
|
985 {'eid': oldeid}, ask_confirm=False) |
|
986 # now we have to manually cleanup relations pointing to deleted |
|
987 # entities |
|
988 thiseids = ','.join(str(eid) for eid in thispending) |
|
989 for rschema, ttypes, role in schema[rdeftype].relation_definitions(): |
|
990 if rschema.type in VIRTUAL_RTYPES: |
|
991 continue |
|
992 sqls = [] |
|
993 if role == 'object': |
|
994 if rschema.inlined: |
|
995 for eschema in ttypes: |
|
996 sqls.append('DELETE FROM cw_%s WHERE cw_%s IN(%%s)' |
|
997 % (eschema, rschema)) |
|
998 else: |
|
999 sqls.append('DELETE FROM %s_relation WHERE eid_to IN(%%s)' |
|
1000 % rschema) |
|
1001 elif not rschema.inlined: |
|
1002 sqls.append('DELETE FROM %s_relation WHERE eid_from IN(%%s)' |
|
1003 % rschema) |
|
1004 for sql in sqls: |
|
1005 self.sqlexec(sql % thiseids, ask_confirm=False) |
|
1006 # remove the old type: use rql to propagate deletion |
|
1007 self.rqlexec('DELETE CWEType ET WHERE ET name %(on)s', {'on': oldname}, |
|
1008 ask_confirm=False) |
|
1009 # elif simply renaming an entity type |
|
1010 else: |
|
1011 self.rqlexec('SET ET name %(newname)s WHERE ET is CWEType, ET name %(on)s', |
|
1012 {'newname' : text_type(newname), 'on' : oldname}, |
|
1013 ask_confirm=False) |
|
1014 if commit: |
|
1015 self.commit() |
|
1016 |
|
1017 def cmd_add_relation_type(self, rtype, addrdef=True, commit=True): |
|
1018 """register a new relation type named `rtype`, as described in the |
|
1019 schema description file. |
|
1020 |
|
1021 `addrdef` is a boolean value; when True, it will also add all relations |
|
1022 of the type just added found in the schema definition file. Note that it |
|
1023 implies an intermediate "commit" which commits the relation type |
|
1024 creation (but not the relation definitions themselves, for which |
|
1025 committing depends on the `commit` argument value). |
|
1026 |
|
1027 """ |
|
1028 reposchema = self.repo.schema |
|
1029 rschema = self.fs_schema.rschema(rtype) |
|
1030 execute = self.cnx.execute |
|
1031 if rtype in reposchema: |
|
1032 print('warning: relation type %s is already known, skip addition' % ( |
|
1033 rtype)) |
|
1034 elif rschema.rule: |
|
1035 gmap = self.group_mapping() |
|
1036 ss.execschemarql(execute, rschema, ss.crschema2rql(rschema, gmap)) |
|
1037 else: |
|
1038 # register the relation into CWRType and insert necessary relation |
|
1039 # definitions |
|
1040 ss.execschemarql(execute, rschema, ss.rschema2rql(rschema, addrdef=False)) |
|
1041 if not rschema.rule and addrdef: |
|
1042 self.commit() |
|
1043 gmap = self.group_mapping() |
|
1044 cmap = self.cstrtype_mapping() |
|
1045 done = set() |
|
1046 for subj, obj in rschema.rdefs: |
|
1047 if not (reposchema.has_entity(subj) |
|
1048 and reposchema.has_entity(obj)): |
|
1049 continue |
|
1050 # symmetric relations appears twice |
|
1051 if (subj, obj) in done: |
|
1052 continue |
|
1053 done.add( (subj, obj) ) |
|
1054 self.cmd_add_relation_definition(subj, rtype, obj) |
|
1055 if rtype in META_RTYPES: |
|
1056 # if the relation is in META_RTYPES, ensure we're adding it for |
|
1057 # all entity types *in the persistent schema*, not only those in |
|
1058 # the fs schema |
|
1059 for etype in self.repo.schema.entities(): |
|
1060 if not etype in self.fs_schema: |
|
1061 # get sample object type and rproperties |
|
1062 objtypes = rschema.objects() |
|
1063 assert len(objtypes) == 1, objtypes |
|
1064 objtype = objtypes[0] |
|
1065 rdef = copy(rschema.rdef(rschema.subjects(objtype)[0], objtype)) |
|
1066 rdef.subject = etype |
|
1067 rdef.rtype = self.repo.schema.rschema(rschema) |
|
1068 rdef.object = self.repo.schema.eschema(objtype) |
|
1069 ss.execschemarql(execute, rdef, |
|
1070 ss.rdef2rql(rdef, cmap, gmap)) |
|
1071 if commit: |
|
1072 self.commit() |
|
1073 |
|
1074 def cmd_drop_relation_type(self, rtype, commit=True): |
|
1075 """Drop an existing relation type. |
|
1076 |
|
1077 Note that existing relations of the given type will be deleted without |
|
1078 any hooks called. |
|
1079 """ |
|
1080 self.rqlexec('DELETE CWRType X WHERE X name %r' % rtype, |
|
1081 ask_confirm=self.verbosity>=2) |
|
1082 self.rqlexec('DELETE CWComputedRType X WHERE X name %r' % rtype, |
|
1083 ask_confirm=self.verbosity>=2) |
|
1084 if commit: |
|
1085 self.commit() |
|
1086 |
|
1087 def cmd_rename_relation_type(self, oldname, newname, commit=True, force=False): |
|
1088 """rename an existing relation |
|
1089 |
|
1090 `oldname` is a string giving the name of the existing relation |
|
1091 `newname` is a string giving the name of the renamed relation |
|
1092 |
|
1093 If `force` is True, proceed even if `oldname` still appears in the fs schema |
|
1094 """ |
|
1095 if oldname in self.fs_schema and not force: |
|
1096 if not self.confirm('Relation %s is still present in the filesystem schema,' |
|
1097 ' do you really want to drop it?' % oldname, |
|
1098 default='n'): |
|
1099 return |
|
1100 self.cmd_add_relation_type(newname, commit=True) |
|
1101 if not self.repo.schema[oldname].rule: |
|
1102 self.rqlexec('SET X %s Y WHERE X %s Y' % (newname, oldname), |
|
1103 ask_confirm=self.verbosity>=2) |
|
1104 self.cmd_drop_relation_type(oldname, commit=commit) |
|
1105 |
|
1106 def cmd_add_relation_definition(self, subjtype, rtype, objtype, commit=True): |
|
1107 """register a new relation definition, from its definition found in the |
|
1108 schema definition file |
|
1109 """ |
|
1110 rschema = self.fs_schema.rschema(rtype) |
|
1111 if rschema.rule: |
|
1112 raise ExecutionError('Cannot add a relation definition for a ' |
|
1113 'computed relation (%s)' % rschema) |
|
1114 if not rtype in self.repo.schema: |
|
1115 self.cmd_add_relation_type(rtype, addrdef=False, commit=True) |
|
1116 if (subjtype, objtype) in self.repo.schema.rschema(rtype).rdefs: |
|
1117 print('warning: relation %s %s %s is already known, skip addition' % ( |
|
1118 subjtype, rtype, objtype)) |
|
1119 return |
|
1120 rdef = self._get_rdef(rschema, subjtype, objtype) |
|
1121 ss.execschemarql(self.cnx.execute, rdef, |
|
1122 ss.rdef2rql(rdef, self.cstrtype_mapping(), |
|
1123 self.group_mapping())) |
|
1124 if commit: |
|
1125 self.commit() |
|
1126 |
|
1127 def _get_rdef(self, rschema, subjtype, objtype): |
|
1128 return self._set_rdef_eid(rschema.rdefs[(subjtype, objtype)]) |
|
1129 |
|
1130 def _set_rdef_eid(self, rdef): |
|
1131 for attr in ('rtype', 'subject', 'object'): |
|
1132 schemaobj = getattr(rdef, attr) |
|
1133 if getattr(schemaobj, 'eid', None) is None: |
|
1134 schemaobj.eid = self.repo.schema[schemaobj].eid |
|
1135 assert schemaobj.eid is not None, schemaobj |
|
1136 return rdef |
|
1137 |
|
1138 def cmd_drop_relation_definition(self, subjtype, rtype, objtype, commit=True): |
|
1139 """Drop an existing relation definition. |
|
1140 |
|
1141 Note that existing relations of the given definition will be deleted |
|
1142 without any hooks called. |
|
1143 """ |
|
1144 rschema = self.repo.schema.rschema(rtype) |
|
1145 if rschema.rule: |
|
1146 raise ExecutionError('Cannot drop a relation definition for a ' |
|
1147 'computed relation (%s)' % rschema) |
|
1148 # unregister the definition from CWAttribute or CWRelation |
|
1149 if rschema.final: |
|
1150 etype = 'CWAttribute' |
|
1151 else: |
|
1152 etype = 'CWRelation' |
|
1153 rql = ('DELETE %s X WHERE X from_entity FE, FE name "%s",' |
|
1154 'X relation_type RT, RT name "%s", X to_entity TE, TE name "%s"') |
|
1155 self.rqlexec(rql % (etype, subjtype, rtype, objtype), |
|
1156 ask_confirm=self.verbosity>=2) |
|
1157 if commit: |
|
1158 self.commit() |
|
1159 |
|
1160 def cmd_sync_schema_props_perms(self, ertype=None, syncperms=True, |
|
1161 syncprops=True, syncrdefs=True, commit=True): |
|
1162 """synchronize the persistent schema against the current definition |
|
1163 schema. |
|
1164 |
|
1165 `ertype` can be : |
|
1166 - None, in that case everything will be synced ; |
|
1167 - a string, it should be an entity type or |
|
1168 a relation type. In that case, only the corresponding |
|
1169 entities / relations will be synced ; |
|
1170 - an rdef object to synchronize only this specific relation definition |
|
1171 |
|
1172 It will synch common stuff between the definition schema and the |
|
1173 actual persistent schema, it won't add/remove any entity or relation. |
|
1174 """ |
|
1175 assert syncperms or syncprops, 'nothing to do' |
|
1176 if ertype is not None: |
|
1177 if isinstance(ertype, RelationDefinitionSchema): |
|
1178 ertype = ertype.as_triple() |
|
1179 if isinstance(ertype, (tuple, list)): |
|
1180 assert len(ertype) == 3, 'not a relation definition' |
|
1181 self._synchronize_rdef_schema(ertype[0], ertype[1], ertype[2], |
|
1182 syncperms=syncperms, |
|
1183 syncprops=syncprops) |
|
1184 else: |
|
1185 erschema = self.repo.schema[ertype] |
|
1186 if isinstance(erschema, CubicWebRelationSchema): |
|
1187 self._synchronize_rschema(erschema, syncrdefs=syncrdefs, |
|
1188 syncperms=syncperms, |
|
1189 syncprops=syncprops) |
|
1190 else: |
|
1191 self._synchronize_eschema(erschema, syncrdefs=syncrdefs, |
|
1192 syncperms=syncperms, |
|
1193 syncprops=syncprops) |
|
1194 else: |
|
1195 for etype in self.repo.schema.entities(): |
|
1196 if etype.eid is None: |
|
1197 # not yet added final etype (thing to BigInt defined in |
|
1198 # yams though 3.13 migration not done yet) |
|
1199 continue |
|
1200 self._synchronize_eschema(etype, syncrdefs=syncrdefs, |
|
1201 syncprops=syncprops, syncperms=syncperms) |
|
1202 if commit: |
|
1203 self.commit() |
|
1204 |
|
1205 def cmd_change_relation_props(self, subjtype, rtype, objtype, |
|
1206 commit=True, **kwargs): |
|
1207 """change some properties of a relation definition |
|
1208 |
|
1209 you usually want to use sync_schema_props_perms instead. |
|
1210 """ |
|
1211 assert kwargs |
|
1212 restriction = [] |
|
1213 if subjtype and subjtype != 'Any': |
|
1214 restriction.append('X from_entity FE, FE name "%s"' % subjtype) |
|
1215 if objtype and objtype != 'Any': |
|
1216 restriction.append('X to_entity TE, TE name "%s"' % objtype) |
|
1217 if rtype and rtype != 'Any': |
|
1218 restriction.append('X relation_type RT, RT name "%s"' % rtype) |
|
1219 assert restriction |
|
1220 values = [] |
|
1221 for k, v in kwargs.items(): |
|
1222 values.append('X %s %%(%s)s' % (k, k)) |
|
1223 if PY2 and isinstance(v, str): |
|
1224 kwargs[k] = unicode(v) |
|
1225 rql = 'SET %s WHERE %s' % (','.join(values), ','.join(restriction)) |
|
1226 self.rqlexec(rql, kwargs, ask_confirm=self.verbosity>=2) |
|
1227 if commit: |
|
1228 self.commit() |
|
1229 |
|
1230 def cmd_set_size_constraint(self, etype, rtype, size, commit=True): |
|
1231 """set change size constraint of a string attribute |
|
1232 |
|
1233 if size is None any size constraint will be removed. |
|
1234 |
|
1235 you usually want to use sync_schema_props_perms instead. |
|
1236 """ |
|
1237 oldvalue = None |
|
1238 for constr in self.repo.schema.eschema(etype).rdef(rtype).constraints: |
|
1239 if isinstance(constr, SizeConstraint): |
|
1240 oldvalue = constr.max |
|
1241 if oldvalue == size: |
|
1242 return |
|
1243 if oldvalue is None and not size is None: |
|
1244 ceid = self.rqlexec('INSERT CWConstraint C: C value %(v)s, C cstrtype CT ' |
|
1245 'WHERE CT name "SizeConstraint"', |
|
1246 {'v': SizeConstraint(size).serialize()}, |
|
1247 ask_confirm=self.verbosity>=2)[0][0] |
|
1248 self.rqlexec('SET X constrained_by C WHERE X from_entity S, X relation_type R, ' |
|
1249 'S name "%s", R name "%s", C eid %s' % (etype, rtype, ceid), |
|
1250 ask_confirm=self.verbosity>=2) |
|
1251 elif not oldvalue is None: |
|
1252 if not size is None: |
|
1253 self.rqlexec('SET C value %%(v)s WHERE X from_entity S, X relation_type R,' |
|
1254 'X constrained_by C, C cstrtype CT, CT name "SizeConstraint",' |
|
1255 'S name "%s", R name "%s"' % (etype, rtype), |
|
1256 {'v': text_type(SizeConstraint(size).serialize())}, |
|
1257 ask_confirm=self.verbosity>=2) |
|
1258 else: |
|
1259 self.rqlexec('DELETE X constrained_by C WHERE X from_entity S, X relation_type R,' |
|
1260 'X constrained_by C, C cstrtype CT, CT name "SizeConstraint",' |
|
1261 'S name "%s", R name "%s"' % (etype, rtype), |
|
1262 ask_confirm=self.verbosity>=2) |
|
1263 # cleanup unused constraints |
|
1264 self.rqlexec('DELETE CWConstraint C WHERE NOT X constrained_by C') |
|
1265 if commit: |
|
1266 self.commit() |
|
1267 |
|
1268 # Workflows handling ###################################################### |
|
1269 |
|
1270 def cmd_make_workflowable(self, etype): |
|
1271 """add workflow relations to an entity type to make it workflowable""" |
|
1272 self.cmd_add_relation_definition(etype, 'in_state', 'State') |
|
1273 self.cmd_add_relation_definition(etype, 'custom_workflow', 'Workflow') |
|
1274 self.cmd_add_relation_definition('TrInfo', 'wf_info_for', etype) |
|
1275 |
|
1276 def cmd_add_workflow(self, name, wfof, default=True, commit=False, |
|
1277 ensure_workflowable=True, **kwargs): |
|
1278 """ |
|
1279 create a new workflow and links it to entity types |
|
1280 :type name: unicode |
|
1281 :param name: name of the workflow |
|
1282 |
|
1283 :type wfof: string or list/tuple of strings |
|
1284 :param wfof: entity type(s) having this workflow |
|
1285 |
|
1286 :type default: bool |
|
1287 :param default: tells wether this is the default workflow |
|
1288 for the specified entity type(s); set it to false in |
|
1289 the case of a subworkflow |
|
1290 |
|
1291 :rtype: `Workflow` |
|
1292 """ |
|
1293 wf = self.cmd_create_entity('Workflow', name=text_type(name), |
|
1294 **kwargs) |
|
1295 if not isinstance(wfof, (list, tuple)): |
|
1296 wfof = (wfof,) |
|
1297 def _missing_wf_rel(etype): |
|
1298 return 'missing workflow relations, see make_workflowable(%s)' % etype |
|
1299 for etype in wfof: |
|
1300 eschema = self.repo.schema[etype] |
|
1301 etype = text_type(etype) |
|
1302 if ensure_workflowable: |
|
1303 assert 'in_state' in eschema.subjrels, _missing_wf_rel(etype) |
|
1304 assert 'custom_workflow' in eschema.subjrels, _missing_wf_rel(etype) |
|
1305 assert 'wf_info_for' in eschema.objrels, _missing_wf_rel(etype) |
|
1306 rset = self.rqlexec( |
|
1307 'SET X workflow_of ET WHERE X eid %(x)s, ET name %(et)s', |
|
1308 {'x': wf.eid, 'et': text_type(etype)}, ask_confirm=False) |
|
1309 assert rset, 'unexistant entity type %s' % etype |
|
1310 if default: |
|
1311 self.rqlexec( |
|
1312 'SET ET default_workflow X WHERE X eid %(x)s, ET name %(et)s', |
|
1313 {'x': wf.eid, 'et': text_type(etype)}, ask_confirm=False) |
|
1314 if commit: |
|
1315 self.commit() |
|
1316 return wf |
|
1317 |
|
1318 def cmd_get_workflow_for(self, etype): |
|
1319 """return default workflow for the given entity type""" |
|
1320 rset = self.rqlexec('Workflow X WHERE ET default_workflow X, ET name %(et)s', |
|
1321 {'et': etype}) |
|
1322 return rset.get_entity(0, 0) |
|
1323 |
|
1324 # CWProperty handling ###################################################### |
|
1325 |
|
1326 def cmd_property_value(self, pkey): |
|
1327 """retreive the site-wide persistent property value for the given key. |
|
1328 |
|
1329 To get a user specific property value, use appropriate method on CWUser |
|
1330 instance. |
|
1331 """ |
|
1332 rset = self.rqlexec( |
|
1333 'Any V WHERE X is CWProperty, X pkey %(k)s, X value V, NOT X for_user U', |
|
1334 {'k': pkey}, ask_confirm=False) |
|
1335 return rset[0][0] |
|
1336 |
|
1337 def cmd_set_property(self, pkey, value): |
|
1338 """set the site-wide persistent property value for the given key to the |
|
1339 given value. |
|
1340 |
|
1341 To set a user specific property value, use appropriate method on CWUser |
|
1342 instance. |
|
1343 """ |
|
1344 value = text_type(value) |
|
1345 try: |
|
1346 prop = self.rqlexec( |
|
1347 'CWProperty X WHERE X pkey %(k)s, NOT X for_user U', |
|
1348 {'k': text_type(pkey)}, ask_confirm=False).get_entity(0, 0) |
|
1349 except Exception: |
|
1350 self.cmd_create_entity('CWProperty', pkey=text_type(pkey), value=value) |
|
1351 else: |
|
1352 prop.cw_set(value=value) |
|
1353 |
|
1354 # other data migration commands ########################################### |
|
1355 |
|
1356 def cmd_storage_changed(self, etype, attribute): |
|
1357 """migrate entities to a custom storage. The new storage is expected to |
|
1358 be set, it will be temporarily removed for the migration. |
|
1359 """ |
|
1360 from logilab.common.shellutils import ProgressBar |
|
1361 source = self.repo.system_source |
|
1362 storage = source.storage(etype, attribute) |
|
1363 source.unset_storage(etype, attribute) |
|
1364 rset = self.rqlexec('Any X WHERE X is %s' % etype, ask_confirm=False) |
|
1365 pb = ProgressBar(len(rset)) |
|
1366 for entity in rset.entities(): |
|
1367 # fill cache. Do not fetch that attribute using the global rql query |
|
1368 # since we may exhaust memory doing that.... |
|
1369 getattr(entity, attribute) |
|
1370 storage.migrate_entity(entity, attribute) |
|
1371 # remove from entity cache to avoid memory exhaustion |
|
1372 del entity.cw_attr_cache[attribute] |
|
1373 pb.update() |
|
1374 print() |
|
1375 source.set_storage(etype, attribute, storage) |
|
1376 |
|
1377 def cmd_create_entity(self, etype, commit=False, **kwargs): |
|
1378 """add a new entity of the given type""" |
|
1379 entity = self.cnx.create_entity(etype, **kwargs) |
|
1380 if commit: |
|
1381 self.commit() |
|
1382 return entity |
|
1383 |
|
1384 def cmd_find(self, etype, **kwargs): |
|
1385 """find entities of the given type and attribute values""" |
|
1386 return self.cnx.find(etype, **kwargs) |
|
1387 |
|
1388 @deprecated("[3.19] use find(*args, **kwargs).entities() instead") |
|
1389 def cmd_find_entities(self, etype, **kwargs): |
|
1390 """find entities of the given type and attribute values""" |
|
1391 return self.cnx.find(etype, **kwargs).entities() |
|
1392 |
|
1393 @deprecated("[3.19] use find(*args, **kwargs).one() instead") |
|
1394 def cmd_find_one_entity(self, etype, **kwargs): |
|
1395 """find one entity of the given type and attribute values. |
|
1396 |
|
1397 raise :exc:`cubicweb.req.FindEntityError` if can not return one and only |
|
1398 one entity. |
|
1399 """ |
|
1400 return self.cnx.find(etype, **kwargs).one() |
|
1401 |
|
1402 def cmd_update_etype_fti_weight(self, etype, weight): |
|
1403 if self.repo.system_source.dbdriver == 'postgres': |
|
1404 self.sqlexec('UPDATE appears SET weight=%(weight)s ' |
|
1405 'FROM entities as X ' |
|
1406 'WHERE X.eid=appears.uid AND X.type=%(type)s', |
|
1407 {'type': etype, 'weight': weight}, ask_confirm=False) |
|
1408 |
|
1409 def cmd_reindex_entities(self, etypes=None): |
|
1410 """force reindexaction of entities of the given types or of all |
|
1411 indexable entity types |
|
1412 """ |
|
1413 from cubicweb.server.checkintegrity import reindex_entities |
|
1414 reindex_entities(self.repo.schema, self.cnx, etypes=etypes) |
|
1415 |
|
1416 @contextmanager |
|
1417 def cmd_dropped_constraints(self, etype, attrname, cstrtype=None, |
|
1418 droprequired=False): |
|
1419 """context manager to drop constraints temporarily on fs_schema |
|
1420 |
|
1421 `cstrtype` should be a constraint class (or a tuple of classes) |
|
1422 and will be passed to isinstance directly |
|
1423 |
|
1424 For instance:: |
|
1425 |
|
1426 >>> with dropped_constraints('MyType', 'myattr', |
|
1427 ... UniqueConstraint, droprequired=True): |
|
1428 ... add_attribute('MyType', 'myattr') |
|
1429 ... # + instructions to fill MyType.myattr column |
|
1430 ... |
|
1431 >>> |
|
1432 |
|
1433 """ |
|
1434 rdef = self.fs_schema.eschema(etype).rdef(attrname) |
|
1435 original_constraints = rdef.constraints |
|
1436 # remove constraints |
|
1437 if cstrtype: |
|
1438 rdef.constraints = [cstr for cstr in original_constraints |
|
1439 if not (cstrtype and isinstance(cstr, cstrtype))] |
|
1440 if droprequired: |
|
1441 original_cardinality = rdef.cardinality |
|
1442 rdef.cardinality = '?' + rdef.cardinality[1] |
|
1443 yield |
|
1444 # restore original constraints |
|
1445 rdef.constraints = original_constraints |
|
1446 if droprequired: |
|
1447 rdef.cardinality = original_cardinality |
|
1448 # update repository schema |
|
1449 self.cmd_sync_schema_props_perms(rdef, syncperms=False) |
|
1450 |
|
1451 def sqlexec(self, sql, args=None, ask_confirm=True): |
|
1452 """execute the given sql if confirmed |
|
1453 |
|
1454 should only be used for low level stuff undoable with existing higher |
|
1455 level actions |
|
1456 """ |
|
1457 if not ask_confirm or self.confirm('Execute sql: %s ?' % sql): |
|
1458 try: |
|
1459 cu = self.cnx.system_sql(sql, args) |
|
1460 except Exception: |
|
1461 ex = sys.exc_info()[1] |
|
1462 if self.confirm('Error: %s\nabort?' % ex, pdb=True): |
|
1463 raise |
|
1464 return |
|
1465 try: |
|
1466 return cu.fetchall() |
|
1467 except Exception: |
|
1468 # no result to fetch |
|
1469 return |
|
1470 |
|
1471 def rqlexec(self, rql, kwargs=None, build_descr=True, |
|
1472 ask_confirm=False): |
|
1473 """rql action""" |
|
1474 if not isinstance(rql, (tuple, list)): |
|
1475 rql = ( (rql, kwargs), ) |
|
1476 res = None |
|
1477 execute = self.cnx.execute |
|
1478 for rql, kwargs in rql: |
|
1479 if kwargs: |
|
1480 msg = '%s (%s)' % (rql, kwargs) |
|
1481 else: |
|
1482 msg = rql |
|
1483 if not ask_confirm or self.confirm('Execute rql: %s ?' % msg): |
|
1484 try: |
|
1485 res = execute(rql, kwargs, build_descr=build_descr) |
|
1486 except Exception as ex: |
|
1487 if self.confirm('Error: %s\nabort?' % ex, pdb=True): |
|
1488 raise |
|
1489 return res |
|
1490 |
|
1491 def rqliter(self, rql, kwargs=None, ask_confirm=True): |
|
1492 return ForRqlIterator(self, rql, kwargs, ask_confirm) |
|
1493 |
|
1494 # low-level commands to repair broken system database ###################### |
|
1495 |
|
1496 def cmd_change_attribute_type(self, etype, attr, newtype, commit=True): |
|
1497 """low level method to change the type of an entity attribute. This is |
|
1498 a quick hack which has some drawback: |
|
1499 * only works when the old type can be changed to the new type by the |
|
1500 underlying rdbms (eg using ALTER TABLE) |
|
1501 * the actual schema won't be updated until next startup |
|
1502 """ |
|
1503 rschema = self.repo.schema.rschema(attr) |
|
1504 oldschema = rschema.objects(etype)[0] |
|
1505 rdef = rschema.rdef(etype, oldschema) |
|
1506 sql = ("UPDATE cw_CWAttribute " |
|
1507 "SET cw_to_entity=(SELECT cw_eid FROM cw_CWEType WHERE cw_name='%s')" |
|
1508 "WHERE cw_eid=%s") % (newtype, rdef.eid) |
|
1509 self.sqlexec(sql, ask_confirm=False) |
|
1510 dbhelper = self.repo.system_source.dbhelper |
|
1511 newrdef = self.fs_schema.rschema(attr).rdef(etype, newtype) |
|
1512 sqltype = sql_type(dbhelper, newrdef) |
|
1513 cursor = self.cnx.cnxset.cu |
|
1514 # consider former cardinality by design, since cardinality change is not handled here |
|
1515 allownull = rdef.cardinality[0] != '1' |
|
1516 dbhelper.change_col_type(cursor, 'cw_%s' % etype, 'cw_%s' % attr, sqltype, allownull) |
|
1517 if commit: |
|
1518 self.commit() |
|
1519 # manually update live schema |
|
1520 eschema = self.repo.schema[etype] |
|
1521 rschema._subj_schemas[eschema].remove(oldschema) |
|
1522 rschema._obj_schemas[oldschema].remove(eschema) |
|
1523 newschema = self.repo.schema[newtype] |
|
1524 rschema._update(eschema, newschema) |
|
1525 rdef.object = newschema |
|
1526 del rschema.rdefs[(eschema, oldschema)] |
|
1527 rschema.rdefs[(eschema, newschema)] = rdef |
|
1528 |
|
1529 def cmd_add_entity_type_table(self, etype, commit=True): |
|
1530 """low level method to create the sql table for an existing entity. |
|
1531 This may be useful on accidental desync between the repository schema |
|
1532 and a sql database |
|
1533 """ |
|
1534 dbhelper = self.repo.system_source.dbhelper |
|
1535 tablesql = eschema2sql(dbhelper, self.repo.schema.eschema(etype), |
|
1536 prefix=SQL_PREFIX) |
|
1537 for sql in tablesql.split(';'): |
|
1538 if sql.strip(): |
|
1539 self.sqlexec(sql) |
|
1540 if commit: |
|
1541 self.commit() |
|
1542 |
|
1543 def cmd_add_relation_type_table(self, rtype, commit=True): |
|
1544 """low level method to create the sql table for an existing relation. |
|
1545 This may be useful on accidental desync between the repository schema |
|
1546 and a sql database |
|
1547 """ |
|
1548 tablesql = rschema2sql(self.repo.schema.rschema(rtype)) |
|
1549 for sql in tablesql.split(';'): |
|
1550 if sql.strip(): |
|
1551 self.sqlexec(sql) |
|
1552 if commit: |
|
1553 self.commit() |
|
1554 |
|
1555 @deprecated("[3.15] use rename_relation_type(oldname, newname)") |
|
1556 def cmd_rename_relation(self, oldname, newname, commit=True): |
|
1557 self.cmd_rename_relation_type(oldname, newname, commit) |
|
1558 |
|
1559 |
|
1560 class ForRqlIterator: |
|
1561 """specific rql iterator to make the loop skipable""" |
|
1562 def __init__(self, helper, rql, kwargs, ask_confirm): |
|
1563 self._h = helper |
|
1564 self.rql = rql |
|
1565 self.kwargs = kwargs |
|
1566 self.ask_confirm = ask_confirm |
|
1567 self._rsetit = None |
|
1568 |
|
1569 def __iter__(self): |
|
1570 return self |
|
1571 |
|
1572 def _get_rset(self): |
|
1573 rql, kwargs = self.rql, self.kwargs |
|
1574 if kwargs: |
|
1575 msg = '%s (%s)' % (rql, kwargs) |
|
1576 else: |
|
1577 msg = rql |
|
1578 if self.ask_confirm: |
|
1579 if not self._h.confirm('Execute rql: %s ?' % msg): |
|
1580 raise StopIteration |
|
1581 try: |
|
1582 return self._h._cw.execute(rql, kwargs) |
|
1583 except Exception as ex: |
|
1584 if self._h.confirm('Error: %s\nabort?' % ex): |
|
1585 raise |
|
1586 else: |
|
1587 raise StopIteration |
|
1588 |
|
1589 def __next__(self): |
|
1590 if self._rsetit is not None: |
|
1591 return next(self._rsetit) |
|
1592 rset = self._get_rset() |
|
1593 self._rsetit = iter(rset) |
|
1594 return next(self._rsetit) |
|
1595 |
|
1596 next = __next__ |
|
1597 |
|
1598 def entities(self): |
|
1599 try: |
|
1600 rset = self._get_rset() |
|
1601 except StopIteration: |
|
1602 return [] |
|
1603 return rset.entities() |
|