author | Sylvain Thénault <sylvain.thenault@logilab.fr> |
Fri, 05 Jun 2009 15:06:55 +0200 | |
changeset 2057 | 0a0cbccafcb5 |
parent 1977 | 606923dff11b |
child 2101 | 08003e0354a7 |
permissions | -rw-r--r-- |
0 | 1 |
"""Defines the central class for the CubicWeb RQL server: the repository. |
2 |
||
3 |
The repository is an abstraction allowing execution of rql queries against |
|
4 |
data sources. Most of the work is actually done in helper classes. The |
|
5 |
repository mainly: |
|
6 |
||
7 |
* brings these classes all together to provide a single access |
|
8 |
point to a cubicweb application. |
|
9 |
* handles session management |
|
10 |
* provides method for pyro registration, to call if pyro is enabled |
|
11 |
||
12 |
||
13 |
:organization: Logilab |
|
1977
606923dff11b
big bunch of copyright / docstring update
Adrien Di Mascio <Adrien.DiMascio@logilab.fr>
parents:
1954
diff
changeset
|
14 |
:copyright: 2001-2009 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2. |
0 | 15 |
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr |
1977
606923dff11b
big bunch of copyright / docstring update
Adrien Di Mascio <Adrien.DiMascio@logilab.fr>
parents:
1954
diff
changeset
|
16 |
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses |
0 | 17 |
""" |
18 |
__docformat__ = "restructuredtext en" |
|
19 |
||
20 |
import sys |
|
21 |
import Queue |
|
22 |
from os.path import join, exists |
|
1016
26387b836099
use datetime instead of mx.DateTime
sylvain.thenault@logilab.fr
parents:
636
diff
changeset
|
23 |
from datetime import datetime |
0 | 24 |
from time import time, localtime, strftime |
25 |
||
26 |
from logilab.common.decorators import cached |
|
27 |
||
28 |
from yams import BadSchemaDefinition |
|
29 |
from rql import RQLSyntaxError |
|
30 |
||
31 |
from cubicweb import (CW_SOFTWARE_ROOT, UnknownEid, AuthenticationError, |
|
32 |
ETypeNotSupportedBySources, RTypeNotSupportedBySources, |
|
33 |
BadConnectionId, Unauthorized, ValidationError, |
|
34 |
ExecutionError, typed_eid, |
|
35 |
CW_MIGRATION_MAP) |
|
36 |
from cubicweb.cwvreg import CubicWebRegistry |
|
37 |
from cubicweb.schema import CubicWebSchema |
|
38 |
||
39 |
from cubicweb.server.utils import RepoThread, LoopTask |
|
40 |
from cubicweb.server.pool import ConnectionsPool, LateOperation, SingleLastOperation |
|
41 |
from cubicweb.server.session import Session, InternalSession |
|
42 |
from cubicweb.server.querier import QuerierHelper |
|
43 |
from cubicweb.server.sources import get_source |
|
44 |
from cubicweb.server.hooksmanager import HooksManager |
|
45 |
from cubicweb.server.hookhelper import rproperty |
|
46 |
||
47 |
||
48 |
class CleanupEidTypeCacheOp(SingleLastOperation): |
|
49 |
"""on rollback of a insert query or commit of delete query, we have to |
|
50 |
clear repository's cache from no more valid entries |
|
51 |
||
52 |
NOTE: querier's rqlst/solutions cache may have been polluted too with |
|
53 |
queries such as Any X WHERE X eid 32 if 32 has been rollbacked however |
|
54 |
generated queries are unpredictable and analysing all the cache probably |
|
55 |
too expensive. Notice that there is no pb when using args to specify eids |
|
56 |
instead of giving them into the rql string. |
|
57 |
""" |
|
58 |
||
59 |
def commit_event(self): |
|
60 |
"""the observed connections pool has been rollbacked, |
|
61 |
remove inserted eid from repository type/source cache |
|
62 |
""" |
|
63 |
self.repo.clear_caches(self.session.query_data('pendingeids', ())) |
|
1482 | 64 |
|
0 | 65 |
def rollback_event(self): |
66 |
"""the observed connections pool has been rollbacked, |
|
67 |
remove inserted eid from repository type/source cache |
|
68 |
""" |
|
69 |
self.repo.clear_caches(self.session.query_data('neweids', ())) |
|
70 |
||
71 |
||
72 |
class FTIndexEntityOp(LateOperation): |
|
73 |
"""operation to delay entity full text indexation to commit |
|
74 |
||
75 |
since fti indexing may trigger discovery of other entities, it should be |
|
76 |
triggered on precommit, not commit, and this should be done after other |
|
77 |
precommit operation which may add relations to the entity |
|
78 |
""" |
|
79 |
||
80 |
def precommit_event(self): |
|
81 |
session = self.session |
|
82 |
entity = self.entity |
|
83 |
if entity.eid in session.query_data('pendingeids', ()): |
|
84 |
return # entity added and deleted in the same transaction |
|
85 |
session.repo.system_source.fti_unindex_entity(session, entity.eid) |
|
86 |
for container in entity.fti_containers(): |
|
87 |
session.repo.index_entity(session, container) |
|
1482 | 88 |
|
0 | 89 |
def commit_event(self): |
90 |
pass |
|
91 |
||
92 |
def del_existing_rel_if_needed(session, eidfrom, rtype, eidto): |
|
93 |
"""delete existing relation when adding a new one if card is 1 or ? |
|
94 |
||
95 |
have to be done once the new relation has been inserted to avoid having |
|
96 |
an entity without a relation for some time |
|
97 |
||
98 |
this kind of behaviour has to be done in the repository so we don't have |
|
99 |
hooks order hazardness |
|
100 |
""" |
|
101 |
# skip delete queries (only?) if session is an internal session. This is |
|
102 |
# hooks responsability to ensure they do not violate relation's cardinality |
|
103 |
if session.is_super_session: |
|
104 |
return |
|
105 |
card = rproperty(session, rtype, eidfrom, eidto, 'cardinality') |
|
106 |
# one may be tented to check for neweids but this may cause more than one |
|
107 |
# relation even with '1?' cardinality if thoses relations are added in the |
|
108 |
# same transaction where the entity is being created. This never occurs from |
|
109 |
# the web interface but may occurs during test or dbapi connection (though |
|
110 |
# not expected for this). So: don't do it, we pretend to ensure repository |
|
111 |
# consistency. |
|
112 |
# XXX should probably not use unsafe_execute! |
|
113 |
if card[0] in '1?': |
|
114 |
rschema = session.repo.schema.rschema(rtype) |
|
115 |
if not rschema.inlined: |
|
1320 | 116 |
session.unsafe_execute( |
117 |
'DELETE X %s Y WHERE X eid %%(x)s, NOT Y eid %%(y)s' % rtype, |
|
118 |
{'x': eidfrom, 'y': eidto}, 'x') |
|
0 | 119 |
if card[1] in '1?': |
1320 | 120 |
session.unsafe_execute( |
121 |
'DELETE X %s Y WHERE NOT X eid %%(x)s, Y eid %%(y)s' % rtype, |
|
122 |
{'x': eidfrom, 'y': eidto}, 'y') |
|
0 | 123 |
|
1482 | 124 |
|
0 | 125 |
class Repository(object): |
126 |
"""a repository provides access to a set of persistent storages for |
|
127 |
entities and relations |
|
128 |
||
129 |
XXX protect pyro access |
|
130 |
""" |
|
1482 | 131 |
|
0 | 132 |
def __init__(self, config, vreg=None, debug=False): |
133 |
self.config = config |
|
134 |
if vreg is None: |
|
135 |
vreg = CubicWebRegistry(config, debug) |
|
136 |
self.vreg = vreg |
|
137 |
self.pyro_registered = False |
|
138 |
self.info('starting repository from %s', self.config.apphome) |
|
139 |
# dictionary of opened sessions |
|
140 |
self._sessions = {} |
|
141 |
# list of functions to be called at regular interval |
|
142 |
self._looping_tasks = [] |
|
143 |
# list of running threads |
|
144 |
self._running_threads = [] |
|
145 |
# initial schema, should be build or replaced latter |
|
146 |
self.schema = CubicWebSchema(config.appid) |
|
147 |
# querier helper, need to be created after sources initialization |
|
148 |
self.querier = QuerierHelper(self, self.schema) |
|
1187 | 149 |
# should we reindex in changes? |
1217 | 150 |
self.do_fti = not config['delay-full-text-indexation'] |
0 | 151 |
# sources |
152 |
self.sources = [] |
|
153 |
self.sources_by_uri = {} |
|
154 |
# FIXME: store additional sources info in the system database ? |
|
155 |
# FIXME: sources should be ordered (add_entity priority) |
|
156 |
for uri, source_config in config.sources().items(): |
|
157 |
if uri == 'admin': |
|
158 |
# not an actual source |
|
1482 | 159 |
continue |
0 | 160 |
source = self.get_source(uri, source_config) |
161 |
self.sources_by_uri[uri] = source |
|
162 |
self.sources.append(source) |
|
163 |
self.system_source = self.sources_by_uri['system'] |
|
164 |
# ensure system source is the first one |
|
165 |
self.sources.remove(self.system_source) |
|
166 |
self.sources.insert(0, self.system_source) |
|
167 |
# cache eid -> type / source |
|
168 |
self._type_source_cache = {} |
|
169 |
# cache (extid, source uri) -> eid |
|
170 |
self._extid_cache = {} |
|
171 |
# create the hooks manager |
|
172 |
self.hm = HooksManager(self.schema) |
|
173 |
# open some connections pools |
|
174 |
self._available_pools = Queue.Queue() |
|
175 |
self._available_pools.put_nowait(ConnectionsPool(self.sources)) |
|
176 |
if config.read_application_schema: |
|
177 |
# normal start: load the application schema from the database |
|
178 |
self.fill_schema() |
|
179 |
elif config.bootstrap_schema: |
|
180 |
# usually during repository creation |
|
181 |
self.warning("set fs application'schema as bootstrap schema") |
|
182 |
config.bootstrap_cubes() |
|
183 |
self.set_bootstrap_schema(self.config.load_schema()) |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
184 |
# need to load the Any and CWUser entity types |
0 | 185 |
self.vreg.schema = self.schema |
186 |
etdirectory = join(CW_SOFTWARE_ROOT, 'entities') |
|
1317 | 187 |
self.vreg.init_registration([etdirectory]) |
1316
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
188 |
self.vreg.load_file(join(etdirectory, '__init__.py'), |
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
189 |
'cubicweb.entities.__init__') |
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
190 |
self.vreg.load_file(join(etdirectory, 'authobjs.py'), |
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
191 |
'cubicweb.entities.authobjs') |
0 | 192 |
else: |
193 |
# test start: use the file system schema (quicker) |
|
194 |
self.warning("set fs application'schema") |
|
195 |
config.bootstrap_cubes() |
|
196 |
self.set_schema(self.config.load_schema()) |
|
197 |
if not config.creating: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
198 |
if 'CWProperty' in self.schema: |
0 | 199 |
self.vreg.init_properties(self.properties()) |
200 |
# call source's init method to complete their initialisation if |
|
201 |
# needed (for instance looking for persistent configuration using an |
|
202 |
# internal session, which is not possible until pools have been |
|
203 |
# initialized) |
|
204 |
for source in self.sources: |
|
205 |
source.init() |
|
206 |
# call application level initialisation hooks |
|
207 |
self.hm.call_hooks('server_startup', repo=self) |
|
208 |
# register a task to cleanup expired session |
|
209 |
self.looping_task(self.config['session-time']/3., |
|
210 |
self.clean_sessions) |
|
211 |
else: |
|
212 |
# call init_creating so for instance native source can configurate |
|
213 |
# tsearch according to postgres version |
|
214 |
for source in self.sources: |
|
215 |
source.init_creating() |
|
216 |
# close initialization pool and reopen fresh ones for proper |
|
217 |
# initialization now that we know cubes |
|
1482 | 218 |
self._get_pool().close(True) |
0 | 219 |
for i in xrange(config['connections-pool-size']): |
220 |
self._available_pools.put_nowait(ConnectionsPool(self.sources)) |
|
1883
011e13d74cfc
shuting -> shutting
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
1880
diff
changeset
|
221 |
self._shutting_down = False |
1482 | 222 |
|
0 | 223 |
# internals ############################################################### |
224 |
||
225 |
def get_source(self, uri, source_config): |
|
226 |
source_config['uri'] = uri |
|
227 |
return get_source(source_config, self.schema, self) |
|
1482 | 228 |
|
0 | 229 |
def set_schema(self, schema, resetvreg=True): |
230 |
schema.rebuild_infered_relations() |
|
231 |
self.info('set schema %s %#x', schema.name, id(schema)) |
|
232 |
self.debug(', '.join(sorted(str(e) for e in schema.entities()))) |
|
233 |
self.querier.set_schema(schema) |
|
234 |
for source in self.sources: |
|
235 |
source.set_schema(schema) |
|
236 |
self.schema = schema |
|
237 |
if resetvreg: |
|
238 |
# full reload of all appobjects |
|
239 |
self.vreg.reset() |
|
240 |
self.vreg.set_schema(schema) |
|
241 |
self.hm.set_schema(schema) |
|
242 |
self.hm.register_system_hooks(self.config) |
|
243 |
# application specific hooks |
|
244 |
if self.config.application_hooks: |
|
245 |
self.info('loading application hooks') |
|
246 |
self.hm.register_hooks(self.config.load_hooks(self.vreg)) |
|
247 |
||
248 |
def fill_schema(self): |
|
249 |
"""lod schema from the repository""" |
|
250 |
from cubicweb.server.schemaserial import deserialize_schema |
|
251 |
self.info('loading schema from the repository') |
|
252 |
appschema = CubicWebSchema(self.config.appid) |
|
253 |
self.set_bootstrap_schema(self.config.load_bootstrap_schema()) |
|
254 |
self.debug('deserializing db schema into %s %#x', appschema.name, id(appschema)) |
|
255 |
session = self.internal_session() |
|
256 |
try: |
|
257 |
try: |
|
258 |
deserialize_schema(appschema, session) |
|
259 |
except BadSchemaDefinition: |
|
260 |
raise |
|
261 |
except Exception, ex: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
262 |
import traceback |
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
263 |
traceback.print_exc() |
1482 | 264 |
raise Exception('Is the database initialised ? (cause: %s)' % |
0 | 265 |
(ex.args and ex.args[0].strip() or 'unknown')), \ |
266 |
None, sys.exc_info()[-1] |
|
267 |
self.info('set the actual schema') |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
268 |
# XXX have to do this since CWProperty isn't in the bootstrap schema |
0 | 269 |
# it'll be redone in set_schema |
270 |
self.set_bootstrap_schema(appschema) |
|
271 |
# 2.49 migration |
|
272 |
if exists(join(self.config.apphome, 'vc.conf')): |
|
273 |
session.set_pool() |
|
274 |
if not 'template' in file(join(self.config.apphome, 'vc.conf')).read(): |
|
275 |
# remaning from cubicweb < 2.38... |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
276 |
session.execute('DELETE CWProperty X WHERE X pkey "system.version.template"') |
0 | 277 |
session.commit() |
278 |
finally: |
|
279 |
session.close() |
|
280 |
self.config.init_cubes(self.get_cubes()) |
|
281 |
self.set_schema(appschema) |
|
1482 | 282 |
|
0 | 283 |
def set_bootstrap_schema(self, schema): |
284 |
"""disable hooks when setting a bootstrap schema, but restore |
|
285 |
the configuration for the next time |
|
286 |
""" |
|
287 |
config = self.config |
|
288 |
# XXX refactor |
|
289 |
config.core_hooks = False |
|
290 |
config.usergroup_hooks = False |
|
291 |
config.schema_hooks = False |
|
292 |
config.notification_hooks = False |
|
293 |
config.application_hooks = False |
|
294 |
self.set_schema(schema, resetvreg=False) |
|
295 |
config.core_hooks = True |
|
296 |
config.usergroup_hooks = True |
|
297 |
config.schema_hooks = True |
|
298 |
config.notification_hooks = True |
|
299 |
config.application_hooks = True |
|
1482 | 300 |
|
0 | 301 |
def start_looping_tasks(self): |
302 |
assert isinstance(self._looping_tasks, list), 'already started' |
|
303 |
for i, (interval, func) in enumerate(self._looping_tasks): |
|
304 |
self._looping_tasks[i] = task = LoopTask(interval, func) |
|
305 |
self.info('starting task %s with interval %.2fs', task.name, |
|
306 |
interval) |
|
307 |
task.start() |
|
308 |
# ensure no tasks will be further added |
|
309 |
self._looping_tasks = tuple(self._looping_tasks) |
|
310 |
||
311 |
def looping_task(self, interval, func): |
|
312 |
"""register a function to be called every `interval` seconds. |
|
1482 | 313 |
|
0 | 314 |
looping tasks can only be registered during repository initialization, |
315 |
once done this method will fail. |
|
316 |
""" |
|
317 |
try: |
|
318 |
self._looping_tasks.append( (interval, func) ) |
|
319 |
except AttributeError: |
|
320 |
raise RuntimeError("can't add looping task once the repository is started") |
|
321 |
||
322 |
def threaded_task(self, func): |
|
323 |
"""start function in a separated thread""" |
|
324 |
t = RepoThread(func, self._running_threads) |
|
325 |
t.start() |
|
1482 | 326 |
|
0 | 327 |
#@locked |
328 |
def _get_pool(self): |
|
329 |
try: |
|
330 |
return self._available_pools.get(True, timeout=5) |
|
331 |
except Queue.Empty: |
|
332 |
raise Exception('no pool available after 5 secs, probably either a ' |
|
333 |
'bug in code (to many uncommited/rollbacked ' |
|
334 |
'connections) or to much load on the server (in ' |
|
335 |
'which case you can try to set a bigger ' |
|
336 |
'connections pools size)') |
|
1482 | 337 |
|
0 | 338 |
def _free_pool(self, pool): |
339 |
pool.rollback() |
|
340 |
self._available_pools.put_nowait(pool) |
|
341 |
||
342 |
def pinfo(self): |
|
343 |
# XXX: session.pool is accessed from a local storage, would be interesting |
|
344 |
# to see if there is a pool set in any thread specific data) |
|
345 |
import threading |
|
346 |
return '%s: %s (%s)' % (self._available_pools.qsize(), |
|
347 |
','.join(session.user.login for session in self._sessions.values() |
|
348 |
if session.pool), |
|
349 |
threading.currentThread()) |
|
350 |
def shutdown(self): |
|
351 |
"""called on server stop event to properly close opened sessions and |
|
352 |
connections |
|
353 |
""" |
|
1883
011e13d74cfc
shuting -> shutting
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
1880
diff
changeset
|
354 |
self._shutting_down = True |
0 | 355 |
if isinstance(self._looping_tasks, tuple): # if tasks have been started |
356 |
for looptask in self._looping_tasks: |
|
357 |
self.info('canceling task %s...', looptask.name) |
|
358 |
looptask.cancel() |
|
359 |
looptask.join() |
|
360 |
self.info('task %s finished', looptask.name) |
|
361 |
for thread in self._running_threads: |
|
362 |
self.info('waiting thread %s...', thread.name) |
|
363 |
thread.join() |
|
364 |
self.info('thread %s finished', thread.name) |
|
365 |
self.hm.call_hooks('server_shutdown', repo=self) |
|
366 |
self.close_sessions() |
|
367 |
while not self._available_pools.empty(): |
|
368 |
pool = self._available_pools.get_nowait() |
|
369 |
try: |
|
370 |
pool.close(True) |
|
371 |
except: |
|
372 |
self.exception('error while closing %s' % pool) |
|
373 |
continue |
|
374 |
if self.pyro_registered: |
|
375 |
pyro_unregister(self.config) |
|
376 |
hits, misses = self.querier.cache_hit, self.querier.cache_miss |
|
377 |
try: |
|
378 |
self.info('rqlt st cache hit/miss: %s/%s (%s%% hits)', hits, misses, |
|
379 |
(hits * 100) / (hits + misses)) |
|
380 |
hits, misses = self.system_source.cache_hit, self.system_source.cache_miss |
|
381 |
self.info('sql cache hit/miss: %s/%s (%s%% hits)', hits, misses, |
|
382 |
(hits * 100) / (hits + misses)) |
|
383 |
nocache = self.system_source.no_cache |
|
384 |
self.info('sql cache usage: %s/%s (%s%%)', hits+ misses, nocache, |
|
385 |
((hits + misses) * 100) / (hits + misses + nocache)) |
|
386 |
except ZeroDivisionError: |
|
387 |
pass |
|
1482 | 388 |
|
0 | 389 |
def authenticate_user(self, session, login, password): |
390 |
"""validate login / password, raise AuthenticationError on failure |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
391 |
return associated CWUser instance on success |
0 | 392 |
""" |
393 |
for source in self.sources: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
394 |
if source.support_entity('CWUser'): |
0 | 395 |
try: |
396 |
eid = source.authenticate(session, login, password) |
|
397 |
break |
|
398 |
except AuthenticationError: |
|
399 |
continue |
|
400 |
else: |
|
401 |
raise AuthenticationError('authentication failed with all sources') |
|
402 |
euser = self._build_user(session, eid) |
|
403 |
if self.config.consider_user_state and \ |
|
404 |
not euser.state in euser.AUTHENTICABLE_STATES: |
|
405 |
raise AuthenticationError('user is not in authenticable state') |
|
406 |
return euser |
|
407 |
||
408 |
def _build_user(self, session, eid): |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
409 |
"""return a CWUser entity for user with the given eid""" |
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
410 |
cls = self.vreg.etype_class('CWUser') |
0 | 411 |
rql = cls.fetch_rql(session.user, ['X eid %(x)s']) |
412 |
rset = session.execute(rql, {'x': eid}, 'x') |
|
413 |
assert len(rset) == 1, rset |
|
414 |
euser = rset.get_entity(0, 0) |
|
1138
22f634977c95
make pylint happy, fix some bugs on the way
sylvain.thenault@logilab.fr
parents:
1016
diff
changeset
|
415 |
# pylint: disable-msg=W0104 |
0 | 416 |
# prefetch / cache euser's groups and properties. This is especially |
417 |
# useful for internal sessions to avoid security insertions |
|
418 |
euser.groups |
|
419 |
euser.properties |
|
420 |
return euser |
|
1482 | 421 |
|
0 | 422 |
# public (dbapi) interface ################################################ |
1482 | 423 |
|
0 | 424 |
def get_schema(self): |
425 |
"""return the application schema. This is a public method, not |
|
426 |
requiring a session id |
|
427 |
""" |
|
428 |
try: |
|
429 |
# necessary to support pickling used by pyro |
|
430 |
self.schema.__hashmode__ = 'pickle' |
|
431 |
return self.schema |
|
432 |
finally: |
|
433 |
self.schema.__hashmode__ = None |
|
434 |
||
435 |
def get_cubes(self): |
|
436 |
"""return the list of cubes used by this application. This is a |
|
437 |
public method, not requiring a session id. |
|
438 |
""" |
|
439 |
versions = self.get_versions(not self.config.creating) |
|
440 |
cubes = list(versions) |
|
441 |
cubes.remove('cubicweb') |
|
442 |
return cubes |
|
443 |
||
444 |
@cached |
|
445 |
def get_versions(self, checkversions=False): |
|
446 |
"""return the a dictionary containing cubes used by this application |
|
447 |
as key with their version as value, including cubicweb version. This is a |
|
448 |
public method, not requiring a session id. |
|
449 |
""" |
|
450 |
from logilab.common.changelog import Version |
|
451 |
vcconf = {} |
|
452 |
session = self.internal_session() |
|
453 |
try: |
|
454 |
for pk, version in session.execute( |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
455 |
'Any K,V WHERE P is CWProperty, P value V, P pkey K, ' |
0 | 456 |
'P pkey ~="system.version.%"', build_descr=False): |
457 |
cube = pk.split('.')[-1] |
|
458 |
# XXX cubicweb migration |
|
459 |
if cube in CW_MIGRATION_MAP: |
|
460 |
cube = CW_MIGRATION_MAP[cube] |
|
461 |
version = Version(version) |
|
462 |
vcconf[cube] = version |
|
463 |
if checkversions: |
|
464 |
if cube != 'cubicweb': |
|
465 |
fsversion = self.config.cube_version(cube) |
|
466 |
else: |
|
467 |
fsversion = self.config.cubicweb_version() |
|
468 |
if version < fsversion: |
|
469 |
msg = ('application has %s version %s but %s ' |
|
470 |
'is installed. Run "cubicweb-ctl upgrade".') |
|
471 |
raise ExecutionError(msg % (cube, version, fsversion)) |
|
472 |
finally: |
|
473 |
session.close() |
|
474 |
return vcconf |
|
1482 | 475 |
|
0 | 476 |
@cached |
477 |
def source_defs(self): |
|
478 |
sources = self.config.sources().copy() |
|
479 |
# remove manager information |
|
480 |
sources.pop('admin', None) |
|
481 |
# remove sensitive information |
|
482 |
for uri, sourcedef in sources.iteritems(): |
|
483 |
sourcedef = sourcedef.copy() |
|
484 |
self.sources_by_uri[uri].remove_sensitive_information(sourcedef) |
|
485 |
sources[uri] = sourcedef |
|
486 |
return sources |
|
487 |
||
488 |
def properties(self): |
|
489 |
"""return a result set containing system wide properties""" |
|
490 |
session = self.internal_session() |
|
491 |
try: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
492 |
return session.execute('Any K,V WHERE P is CWProperty,' |
0 | 493 |
'P pkey K, P value V, NOT P for_user U', |
494 |
build_descr=False) |
|
495 |
finally: |
|
496 |
session.close() |
|
497 |
||
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
498 |
def register_user(self, login, password, email=None, **kwargs): |
0 | 499 |
"""check a user with the given login exists, if not create it with the |
500 |
given password. This method is designed to be used for anonymous |
|
501 |
registration on public web site. |
|
502 |
""" |
|
1664
03ebeccf9f1d
add XXX before 2 calls to self.repo.internal_session() on the web interface side
Florent <florent@secondweb.fr>
parents:
1482
diff
changeset
|
503 |
# XXX should not be called from web interface |
0 | 504 |
session = self.internal_session() |
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
505 |
# for consistency, keep same error as unique check hook (although not required) |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
506 |
errmsg = session._('the value "%s" is already used, use another one') |
0 | 507 |
try: |
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
508 |
if (session.execute('CWUser X WHERE X login %(login)s', {'login': login}) |
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
509 |
or session.execute('CWUser X WHERE X use_email C, C address %(login)s', |
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
510 |
{'login': login})): |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
511 |
raise ValidationError(None, {'login': errmsg % login}) |
0 | 512 |
# we have to create the user |
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
513 |
user = self.vreg.etype_class('CWUser')(session, None) |
0 | 514 |
if isinstance(password, unicode): |
515 |
# password should *always* be utf8 encoded |
|
516 |
password = password.encode('UTF8') |
|
517 |
kwargs['login'] = login |
|
518 |
kwargs['upassword'] = password |
|
519 |
user.update(kwargs) |
|
520 |
self.glob_add_entity(session, user) |
|
521 |
session.execute('SET X in_group G WHERE X eid %(x)s, G name "users"', |
|
522 |
{'x': user.eid}) |
|
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
523 |
if email or '@' in login: |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
524 |
d = {'login': login, 'email': email or login} |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
525 |
if session.execute('EmailAddress X WHERE X address %(email)s', d): |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
526 |
raise ValidationError(None, {'address': errmsg % d['email']}) |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
527 |
session.execute('INSERT EmailAddress X: X address %(email)s, ' |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
528 |
'U primary_email X, U use_email X WHERE U login %(login)s', d) |
0 | 529 |
session.commit() |
530 |
finally: |
|
531 |
session.close() |
|
594
76218d42d21f
return success or not on creation of user
Arthur Lutz <arthur.lutz@logilab.fr>
parents:
479
diff
changeset
|
532 |
return True |
1482 | 533 |
|
0 | 534 |
def connect(self, login, password, cnxprops=None): |
535 |
"""open a connection for a given user |
|
536 |
||
537 |
base_url may be needed to send mails |
|
538 |
cnxtype indicate if this is a pyro connection or a in-memory connection |
|
1482 | 539 |
|
0 | 540 |
raise `AuthenticationError` if the authentication failed |
541 |
raise `ConnectionError` if we can't open a connection |
|
542 |
""" |
|
543 |
# use an internal connection |
|
544 |
session = self.internal_session() |
|
545 |
# try to get a user object |
|
546 |
try: |
|
547 |
user = self.authenticate_user(session, login, password) |
|
548 |
finally: |
|
549 |
session.close() |
|
550 |
session = Session(user, self, cnxprops) |
|
551 |
user.req = user.rset.req = session |
|
552 |
user.clear_related_cache() |
|
553 |
self._sessions[session.id] = session |
|
554 |
self.info('opened %s', session) |
|
555 |
self.hm.call_hooks('session_open', session=session) |
|
556 |
# commit session at this point in case write operation has been done |
|
557 |
# during `session_open` hooks |
|
558 |
session.commit() |
|
559 |
return session.id |
|
560 |
||
561 |
def execute(self, sessionid, rqlstring, args=None, eid_key=None, build_descr=True): |
|
562 |
"""execute a RQL query |
|
563 |
||
564 |
* rqlstring should be an unicode string or a plain ascii string |
|
565 |
* args the optional parameters used in the query |
|
566 |
* build_descr is a flag indicating if the description should be |
|
567 |
built on select queries |
|
568 |
""" |
|
569 |
session = self._get_session(sessionid, setpool=True) |
|
570 |
try: |
|
571 |
try: |
|
572 |
return self.querier.execute(session, rqlstring, args, eid_key, |
|
573 |
build_descr) |
|
574 |
except (Unauthorized, RQLSyntaxError): |
|
575 |
raise |
|
576 |
except ValidationError, ex: |
|
577 |
# need ValidationError normalization here so error may pass |
|
578 |
# through pyro |
|
579 |
if hasattr(ex.entity, 'eid'): |
|
580 |
ex.entity = ex.entity.eid # error raised by yams |
|
581 |
args = list(ex.args) |
|
582 |
args[0] = ex.entity |
|
583 |
ex.args = tuple(args) |
|
584 |
raise |
|
585 |
except: |
|
586 |
# FIXME: check error to catch internal errors |
|
587 |
self.exception('unexpected error') |
|
588 |
raise |
|
589 |
finally: |
|
590 |
session.reset_pool() |
|
1482 | 591 |
|
0 | 592 |
def describe(self, sessionid, eid): |
593 |
"""return a tuple (type, source, extid) for the entity with id <eid>""" |
|
594 |
session = self._get_session(sessionid, setpool=True) |
|
595 |
try: |
|
596 |
return self.type_and_source_from_eid(eid, session) |
|
597 |
finally: |
|
598 |
session.reset_pool() |
|
599 |
||
600 |
def check_session(self, sessionid): |
|
601 |
"""raise `BadSessionId` if the connection is no more valid""" |
|
602 |
self._get_session(sessionid, setpool=False) |
|
603 |
||
604 |
def get_shared_data(self, sessionid, key, default=None, pop=False): |
|
605 |
"""return the session's data dictionary""" |
|
606 |
session = self._get_session(sessionid, setpool=False) |
|
607 |
return session.get_shared_data(key, default, pop) |
|
608 |
||
609 |
def set_shared_data(self, sessionid, key, value, querydata=False): |
|
610 |
"""set value associated to `key` in shared data |
|
611 |
||
612 |
if `querydata` is true, the value will be added to the repository |
|
613 |
session's query data which are cleared on commit/rollback of the current |
|
614 |
transaction, and won't be available through the connexion, only on the |
|
615 |
repository side. |
|
616 |
""" |
|
617 |
session = self._get_session(sessionid, setpool=False) |
|
618 |
session.set_shared_data(key, value, querydata) |
|
619 |
||
620 |
def commit(self, sessionid): |
|
621 |
"""commit transaction for the session with the given id""" |
|
622 |
self.debug('begin commit for session %s', sessionid) |
|
623 |
try: |
|
1880
293fe4b49e28
two in one: #343320: Logging out while deleting a CWUser blocks the cw server / #342692: ensure transaction state when Ctrl-C or other stop signal is received
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1664
diff
changeset
|
624 |
self._get_session(sessionid).commit() |
1482 | 625 |
except (ValidationError, Unauthorized): |
0 | 626 |
raise |
627 |
except: |
|
628 |
self.exception('unexpected error') |
|
629 |
raise |
|
1482 | 630 |
|
0 | 631 |
def rollback(self, sessionid): |
632 |
"""commit transaction for the session with the given id""" |
|
633 |
self.debug('begin rollback for session %s', sessionid) |
|
634 |
try: |
|
1880
293fe4b49e28
two in one: #343320: Logging out while deleting a CWUser blocks the cw server / #342692: ensure transaction state when Ctrl-C or other stop signal is received
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1664
diff
changeset
|
635 |
self._get_session(sessionid).rollback() |
0 | 636 |
except: |
637 |
self.exception('unexpected error') |
|
638 |
raise |
|
639 |
||
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
640 |
def close(self, sessionid, checkshuttingdown=True): |
0 | 641 |
"""close the session with the given id""" |
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
642 |
session = self._get_session(sessionid, setpool=True, |
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
643 |
checkshuttingdown=checkshuttingdown) |
0 | 644 |
# operation uncommited before close are rollbacked before hook is called |
645 |
session.rollback() |
|
646 |
self.hm.call_hooks('session_close', session=session) |
|
647 |
# commit session at this point in case write operation has been done |
|
648 |
# during `session_close` hooks |
|
649 |
session.commit() |
|
650 |
session.close() |
|
651 |
del self._sessions[sessionid] |
|
652 |
self.info('closed session %s for user %s', sessionid, session.user.login) |
|
1482 | 653 |
|
0 | 654 |
def user_info(self, sessionid, props=None): |
655 |
"""this method should be used by client to: |
|
656 |
* check session id validity |
|
657 |
* update user information on each user's request (i.e. groups and |
|
658 |
custom properties) |
|
659 |
""" |
|
660 |
session = self._get_session(sessionid, setpool=False) |
|
661 |
if props: |
|
662 |
# update session properties |
|
663 |
for prop, value in props.items(): |
|
664 |
session.change_property(prop, value) |
|
665 |
user = session.user |
|
666 |
return user.eid, user.login, user.groups, user.properties |
|
1482 | 667 |
|
0 | 668 |
# public (inter-repository) interface ##################################### |
1482 | 669 |
|
0 | 670 |
def entities_modified_since(self, etypes, mtime): |
671 |
"""function designed to be called from an external repository which |
|
672 |
is using this one as a rql source for synchronization, and return a |
|
673 |
3-uple containing : |
|
674 |
* the local date |
|
675 |
* list of (etype, eid) of entities of the given types which have been |
|
676 |
modified since the given timestamp (actually entities whose full text |
|
677 |
index content has changed) |
|
678 |
* list of (etype, eid) of entities of the given types which have been |
|
679 |
deleted since the given timestamp |
|
680 |
""" |
|
681 |
session = self.internal_session() |
|
1016
26387b836099
use datetime instead of mx.DateTime
sylvain.thenault@logilab.fr
parents:
636
diff
changeset
|
682 |
updatetime = datetime.now() |
0 | 683 |
try: |
684 |
modentities, delentities = self.system_source.modified_entities( |
|
685 |
session, etypes, mtime) |
|
686 |
return updatetime, modentities, delentities |
|
687 |
finally: |
|
688 |
session.close() |
|
689 |
||
690 |
# session handling ######################################################## |
|
1482 | 691 |
|
0 | 692 |
def close_sessions(self): |
693 |
"""close every opened sessions""" |
|
694 |
for sessionid in self._sessions.keys(): |
|
695 |
try: |
|
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
696 |
self.close(sessionid, checkshuttingdown=False) |
0 | 697 |
except: |
698 |
self.exception('error while closing session %s' % sessionid) |
|
699 |
||
700 |
def clean_sessions(self): |
|
701 |
"""close sessions not used since an amount of time specified in the |
|
702 |
configuration |
|
703 |
""" |
|
704 |
mintime = time() - self.config['session-time'] |
|
705 |
self.debug('cleaning session unused since %s', |
|
706 |
strftime('%T', localtime(mintime))) |
|
707 |
nbclosed = 0 |
|
708 |
for session in self._sessions.values(): |
|
709 |
if session.timestamp < mintime: |
|
710 |
self.close(session.id) |
|
711 |
nbclosed += 1 |
|
712 |
return nbclosed |
|
1482 | 713 |
|
0 | 714 |
def internal_session(self, cnxprops=None): |
715 |
"""return a dbapi like connection/cursor using internal user which |
|
716 |
have every rights on the repository. You'll *have to* commit/rollback |
|
717 |
or close (rollback implicitly) the session once the job's done, else |
|
718 |
you'll leak connections pool up to the time where no more pool is |
|
719 |
available, causing irremediable freeze... |
|
720 |
""" |
|
721 |
session = InternalSession(self, cnxprops) |
|
722 |
session.set_pool() |
|
723 |
return session |
|
1482 | 724 |
|
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
725 |
def _get_session(self, sessionid, setpool=False, checkshuttingdown=True): |
0 | 726 |
"""return the user associated to the given session identifier""" |
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
727 |
if checkshuttingdown and self._shutting_down: |
1883
011e13d74cfc
shuting -> shutting
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
1880
diff
changeset
|
728 |
raise Exception('Repository is shutting down') |
0 | 729 |
try: |
730 |
session = self._sessions[sessionid] |
|
731 |
except KeyError: |
|
732 |
raise BadConnectionId('No such session %s' % sessionid) |
|
733 |
if setpool: |
|
734 |
session.set_pool() |
|
735 |
return session |
|
736 |
||
737 |
# data sources handling ################################################### |
|
738 |
# * correspondance between eid and (type, source) |
|
739 |
# * correspondance between eid and local id (i.e. specific to a given source) |
|
740 |
# * searchable text indexes |
|
1482 | 741 |
|
0 | 742 |
def type_and_source_from_eid(self, eid, session=None): |
743 |
"""return a tuple (type, source, extid) for the entity with id <eid>""" |
|
744 |
try: |
|
745 |
eid = typed_eid(eid) |
|
746 |
except ValueError: |
|
747 |
raise UnknownEid(eid) |
|
748 |
try: |
|
749 |
return self._type_source_cache[eid] |
|
750 |
except KeyError: |
|
751 |
if session is None: |
|
752 |
session = self.internal_session() |
|
753 |
reset_pool = True |
|
754 |
else: |
|
755 |
reset_pool = False |
|
756 |
try: |
|
757 |
etype, uri, extid = self.system_source.eid_type_source(session, |
|
758 |
eid) |
|
759 |
finally: |
|
760 |
if reset_pool: |
|
761 |
session.reset_pool() |
|
762 |
self._type_source_cache[eid] = (etype, uri, extid) |
|
763 |
if uri != 'system': |
|
764 |
self._extid_cache[(extid, uri)] = eid |
|
765 |
return etype, uri, extid |
|
766 |
||
767 |
def clear_caches(self, eids): |
|
768 |
etcache = self._type_source_cache |
|
769 |
extidcache = self._extid_cache |
|
770 |
rqlcache = self.querier._rql_cache |
|
771 |
for eid in eids: |
|
772 |
try: |
|
773 |
etype, uri, extid = etcache.pop(typed_eid(eid)) # may be a string in some cases |
|
774 |
rqlcache.pop('%s X WHERE X eid %s' % (etype, eid), None) |
|
775 |
extidcache.pop((extid, uri), None) |
|
776 |
except KeyError: |
|
777 |
etype = None |
|
778 |
rqlcache.pop('Any X WHERE X eid %s' % eid, None) |
|
779 |
for source in self.sources: |
|
780 |
source.clear_eid_cache(eid, etype) |
|
1482 | 781 |
|
0 | 782 |
def type_from_eid(self, eid, session=None): |
783 |
"""return the type of the entity with id <eid>""" |
|
784 |
return self.type_and_source_from_eid(eid, session)[0] |
|
1482 | 785 |
|
0 | 786 |
def source_from_eid(self, eid, session=None): |
787 |
"""return the source for the given entity's eid""" |
|
788 |
return self.sources_by_uri[self.type_and_source_from_eid(eid, session)[1]] |
|
1482 | 789 |
|
0 | 790 |
def eid2extid(self, source, eid, session=None): |
791 |
"""get local id from an eid""" |
|
792 |
etype, uri, extid = self.type_and_source_from_eid(eid, session) |
|
793 |
if source.uri != uri: |
|
794 |
# eid not from the given source |
|
795 |
raise UnknownEid(eid) |
|
796 |
return extid |
|
797 |
||
1954 | 798 |
def extid2eid(self, source, extid, etype, session=None, insert=True, |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
799 |
recreate=False): |
0 | 800 |
"""get eid from a local id. An eid is attributed if no record is found""" |
1954 | 801 |
cachekey = (extid, source.uri) |
0 | 802 |
try: |
803 |
return self._extid_cache[cachekey] |
|
804 |
except KeyError: |
|
805 |
pass |
|
806 |
reset_pool = False |
|
807 |
if session is None: |
|
808 |
session = self.internal_session() |
|
809 |
reset_pool = True |
|
1954 | 810 |
eid = self.system_source.extid2eid(session, source, extid) |
0 | 811 |
if eid is not None: |
812 |
self._extid_cache[cachekey] = eid |
|
1954 | 813 |
self._type_source_cache[eid] = (etype, source.uri, extid) |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
814 |
if recreate: |
1954 | 815 |
entity = source.before_entity_insertion(session, extid, etype, eid) |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
816 |
entity._cw_recreating = True |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
817 |
if source.should_call_hooks: |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
818 |
self.hm.call_hooks('before_add_entity', etype, session, entity) |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
819 |
# XXX add fti op ? |
1954 | 820 |
source.after_entity_insertion(session, extid, entity) |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
821 |
if source.should_call_hooks: |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
822 |
self.hm.call_hooks('after_add_entity', etype, session, entity) |
0 | 823 |
if reset_pool: |
824 |
session.reset_pool() |
|
825 |
return eid |
|
826 |
if not insert: |
|
827 |
return |
|
1954 | 828 |
# no link between extid and eid, create one using an internal session |
0 | 829 |
# since the current session user may not have required permissions to |
830 |
# do necessary stuff and we don't want to commit user session. |
|
831 |
# |
|
450
5e14ea0e81c8
a note for later
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
341
diff
changeset
|
832 |
# Moreover, even if session is already an internal session but is |
0 | 833 |
# processing a commit, we have to use another one |
834 |
if not session.is_internal_session: |
|
835 |
session = self.internal_session() |
|
836 |
reset_pool = True |
|
837 |
try: |
|
838 |
eid = self.system_source.create_eid(session) |
|
839 |
self._extid_cache[cachekey] = eid |
|
1954 | 840 |
self._type_source_cache[eid] = (etype, source.uri, extid) |
841 |
entity = source.before_entity_insertion(session, extid, etype, eid) |
|
0 | 842 |
if source.should_call_hooks: |
843 |
self.hm.call_hooks('before_add_entity', etype, session, entity) |
|
450
5e14ea0e81c8
a note for later
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
341
diff
changeset
|
844 |
# XXX call add_info with complete=False ? |
1954 | 845 |
self.add_info(session, entity, source, extid) |
846 |
source.after_entity_insertion(session, extid, entity) |
|
0 | 847 |
if source.should_call_hooks: |
848 |
self.hm.call_hooks('after_add_entity', etype, session, entity) |
|
849 |
else: |
|
850 |
# minimal meta-data |
|
851 |
session.execute('SET X is E WHERE X eid %(x)s, E name %(name)s', |
|
852 |
{'x': entity.eid, 'name': entity.id}, 'x') |
|
853 |
session.commit(reset_pool) |
|
854 |
return eid |
|
855 |
except: |
|
856 |
session.rollback(reset_pool) |
|
857 |
raise |
|
1482 | 858 |
|
0 | 859 |
def add_info(self, session, entity, source, extid=None, complete=True): |
860 |
"""add type and source info for an eid into the system table, |
|
861 |
and index the entity with the full text index |
|
862 |
""" |
|
863 |
# begin by inserting eid/type/source/extid into the entities table |
|
864 |
self.system_source.add_info(session, entity, source, extid) |
|
865 |
if complete: |
|
866 |
entity.complete(entity.e_schema.indexable_attributes()) |
|
867 |
session.add_query_data('neweids', entity.eid) |
|
868 |
# now we can update the full text index |
|
1160
77bf88f01fcc
new delay-full-text-indexation configuration option
sylvain.thenault@logilab.fr
parents:
594
diff
changeset
|
869 |
if self.do_fti: |
77bf88f01fcc
new delay-full-text-indexation configuration option
sylvain.thenault@logilab.fr
parents:
594
diff
changeset
|
870 |
FTIndexEntityOp(session, entity=entity) |
0 | 871 |
CleanupEidTypeCacheOp(session) |
1482 | 872 |
|
0 | 873 |
def delete_info(self, session, eid): |
874 |
self._prepare_delete_info(session, eid) |
|
875 |
self._delete_info(session, eid) |
|
1482 | 876 |
|
0 | 877 |
def _prepare_delete_info(self, session, eid): |
878 |
"""prepare the repository for deletion of an entity: |
|
879 |
* update the fti |
|
880 |
* mark eid as being deleted in session info |
|
881 |
* setup cache update operation |
|
882 |
""" |
|
883 |
self.system_source.fti_unindex_entity(session, eid) |
|
884 |
pending = session.query_data('pendingeids', set(), setdefault=True) |
|
885 |
pending.add(eid) |
|
886 |
CleanupEidTypeCacheOp(session) |
|
1482 | 887 |
|
0 | 888 |
def _delete_info(self, session, eid): |
889 |
"""delete system information on deletion of an entity: |
|
890 |
* delete all relations on this entity |
|
891 |
* transfer record from the entities table to the deleted_entities table |
|
892 |
""" |
|
893 |
etype, uri, extid = self.type_and_source_from_eid(eid, session) |
|
894 |
self._clear_eid_relations(session, etype, eid) |
|
895 |
self.system_source.delete_info(session, eid, etype, uri, extid) |
|
1482 | 896 |
|
0 | 897 |
def _clear_eid_relations(self, session, etype, eid): |
898 |
"""when a entity is deleted, build and execute rql query to delete all |
|
899 |
its relations |
|
900 |
""" |
|
901 |
rql = [] |
|
902 |
eschema = self.schema.eschema(etype) |
|
903 |
for rschema, targetschemas, x in eschema.relation_definitions(): |
|
904 |
rtype = rschema.type |
|
905 |
if rtype == 'identity': |
|
906 |
continue |
|
907 |
var = '%s%s' % (rtype.upper(), x.upper()) |
|
908 |
if x == 'subject': |
|
909 |
# don't skip inlined relation so they are regularly |
|
910 |
# deleted and so hooks are correctly called |
|
911 |
rql.append('X %s %s' % (rtype, var)) |
|
912 |
else: |
|
913 |
rql.append('%s %s X' % (var, rtype)) |
|
914 |
rql = 'DELETE %s WHERE X eid %%(x)s' % ','.join(rql) |
|
915 |
# unsafe_execute since we suppose that if user can delete the entity, |
|
916 |
# he can delete all its relations without security checking |
|
917 |
session.unsafe_execute(rql, {'x': eid}, 'x', build_descr=False) |
|
918 |
||
919 |
def index_entity(self, session, entity): |
|
920 |
"""full text index a modified entity""" |
|
921 |
alreadydone = session.query_data('indexedeids', set(), setdefault=True) |
|
922 |
if entity.eid in alreadydone: |
|
923 |
self.info('skipping reindexation of %s, already done', entity.eid) |
|
924 |
return |
|
925 |
alreadydone.add(entity.eid) |
|
926 |
self.system_source.fti_index_entity(session, entity) |
|
1482 | 927 |
|
0 | 928 |
def locate_relation_source(self, session, subject, rtype, object): |
929 |
subjsource = self.source_from_eid(subject, session) |
|
930 |
objsource = self.source_from_eid(object, session) |
|
931 |
if not (subjsource is objsource and subjsource.support_relation(rtype, 1)): |
|
932 |
source = self.system_source |
|
933 |
if not source.support_relation(rtype, 1): |
|
934 |
raise RTypeNotSupportedBySources(rtype) |
|
935 |
else: |
|
936 |
source = subjsource |
|
937 |
return source |
|
1482 | 938 |
|
0 | 939 |
def locate_etype_source(self, etype): |
940 |
for source in self.sources: |
|
941 |
if source.support_entity(etype, 1): |
|
942 |
return source |
|
943 |
else: |
|
944 |
raise ETypeNotSupportedBySources(etype) |
|
1482 | 945 |
|
0 | 946 |
def glob_add_entity(self, session, entity): |
947 |
"""add an entity to the repository |
|
1482 | 948 |
|
0 | 949 |
the entity eid should originaly be None and a unique eid is assigned to |
950 |
the entity instance |
|
951 |
""" |
|
952 |
entity = entity.pre_add_hook() |
|
953 |
eschema = entity.e_schema |
|
954 |
etype = str(eschema) |
|
955 |
source = self.locate_etype_source(etype) |
|
956 |
# attribute an eid to the entity before calling hooks |
|
957 |
entity.set_eid(self.system_source.create_eid(session)) |
|
958 |
entity._is_saved = False # entity has an eid but is not yet saved |
|
959 |
relations = [] |
|
960 |
# if inlined relations are specified, fill entity's related cache to |
|
961 |
# avoid unnecessary queries |
|
962 |
for attr in entity.keys(): |
|
963 |
rschema = eschema.subject_relation(attr) |
|
964 |
if not rschema.is_final(): # inlined relation |
|
965 |
entity.set_related_cache(attr, 'subject', |
|
966 |
entity.req.eid_rset(entity[attr])) |
|
967 |
relations.append((attr, entity[attr])) |
|
968 |
if source.should_call_hooks: |
|
969 |
self.hm.call_hooks('before_add_entity', etype, session, entity) |
|
970 |
entity.set_defaults() |
|
971 |
entity.check(creation=True) |
|
972 |
source.add_entity(session, entity) |
|
973 |
if source.uri != 'system': |
|
974 |
extid = source.get_extid(entity) |
|
975 |
self._extid_cache[(str(extid), source.uri)] = entity.eid |
|
976 |
else: |
|
977 |
extid = None |
|
978 |
self.add_info(session, entity, source, extid, complete=False) |
|
979 |
entity._is_saved = True # entity has an eid and is saved |
|
980 |
#print 'added', entity#, entity.items() |
|
981 |
# trigger after_add_entity after after_add_relation |
|
982 |
if source.should_call_hooks: |
|
983 |
self.hm.call_hooks('after_add_entity', etype, session, entity) |
|
984 |
# call hooks for inlined relations |
|
985 |
for attr, value in relations: |
|
986 |
self.hm.call_hooks('before_add_relation', attr, session, |
|
987 |
entity.eid, attr, value) |
|
988 |
self.hm.call_hooks('after_add_relation', attr, session, |
|
989 |
entity.eid, attr, value) |
|
990 |
return entity.eid |
|
1482 | 991 |
|
0 | 992 |
def glob_update_entity(self, session, entity): |
993 |
"""replace an entity in the repository |
|
994 |
the type and the eid of an entity must not be changed |
|
995 |
""" |
|
996 |
#print 'update', entity |
|
997 |
entity.check() |
|
998 |
etype = str(entity.e_schema) |
|
999 |
eschema = entity.e_schema |
|
1000 |
only_inline_rels, need_fti_update = True, False |
|
1001 |
relations = [] |
|
1002 |
for attr in entity.keys(): |
|
1003 |
if attr == 'eid': |
|
1004 |
continue |
|
1005 |
rschema = eschema.subject_relation(attr) |
|
1006 |
if rschema.is_final(): |
|
1007 |
if eschema.rproperty(attr, 'fulltextindexed'): |
|
1008 |
need_fti_update = True |
|
1009 |
only_inline_rels = False |
|
1010 |
else: |
|
1011 |
# inlined relation |
|
1012 |
previous_value = entity.related(attr) |
|
1013 |
if previous_value: |
|
1014 |
previous_value = previous_value[0][0] # got a result set |
|
1015 |
self.hm.call_hooks('before_delete_relation', attr, session, |
|
1016 |
entity.eid, attr, previous_value) |
|
1017 |
entity.set_related_cache(attr, 'subject', |
|
1018 |
entity.req.eid_rset(entity[attr])) |
|
1019 |
relations.append((attr, entity[attr], previous_value)) |
|
1020 |
source = self.source_from_eid(entity.eid, session) |
|
1021 |
if source.should_call_hooks: |
|
1022 |
# call hooks for inlined relations |
|
1023 |
for attr, value, _ in relations: |
|
1024 |
self.hm.call_hooks('before_add_relation', attr, session, |
|
1025 |
entity.eid, attr, value) |
|
1026 |
if not only_inline_rels: |
|
1027 |
self.hm.call_hooks('before_update_entity', etype, session, |
|
1028 |
entity) |
|
1029 |
source.update_entity(session, entity) |
|
1030 |
if not only_inline_rels: |
|
1160
77bf88f01fcc
new delay-full-text-indexation configuration option
sylvain.thenault@logilab.fr
parents:
594
diff
changeset
|
1031 |
if need_fti_update and self.do_fti: |
0 | 1032 |
# reindex the entity only if this query is updating at least |
1033 |
# one indexable attribute |
|
1034 |
FTIndexEntityOp(session, entity=entity) |
|
1035 |
if source.should_call_hooks: |
|
1036 |
self.hm.call_hooks('after_update_entity', etype, session, |
|
1037 |
entity) |
|
1038 |
if source.should_call_hooks: |
|
1039 |
for attr, value, prevvalue in relations: |
|
1040 |
if prevvalue: |
|
1041 |
self.hm.call_hooks('after_delete_relation', attr, session, |
|
1042 |
entity.eid, attr, prevvalue) |
|
1043 |
del_existing_rel_if_needed(session, entity.eid, attr, value) |
|
1044 |
self.hm.call_hooks('after_add_relation', attr, session, |
|
1045 |
entity.eid, attr, value) |
|
1046 |
||
1047 |
def glob_delete_entity(self, session, eid): |
|
1048 |
"""delete an entity and all related entities from the repository""" |
|
1049 |
#print 'deleting', eid |
|
1050 |
# call delete_info before hooks |
|
1051 |
self._prepare_delete_info(session, eid) |
|
1052 |
etype, uri, extid = self.type_and_source_from_eid(eid, session) |
|
1053 |
source = self.sources_by_uri[uri] |
|
1054 |
if source.should_call_hooks: |
|
1055 |
self.hm.call_hooks('before_delete_entity', etype, session, eid) |
|
1056 |
self._delete_info(session, eid) |
|
1057 |
source.delete_entity(session, etype, eid) |
|
1058 |
if source.should_call_hooks: |
|
1059 |
self.hm.call_hooks('after_delete_entity', etype, session, eid) |
|
1060 |
# don't clear cache here this is done in a hook on commit |
|
1482 | 1061 |
|
0 | 1062 |
def glob_add_relation(self, session, subject, rtype, object): |
1063 |
"""add a relation to the repository""" |
|
1064 |
assert subject is not None |
|
1065 |
assert rtype |
|
1066 |
assert object is not None |
|
1067 |
source = self.locate_relation_source(session, subject, rtype, object) |
|
1068 |
#print 'adding', subject, rtype, object, 'to', source |
|
1069 |
if source.should_call_hooks: |
|
1070 |
del_existing_rel_if_needed(session, subject, rtype, object) |
|
1071 |
self.hm.call_hooks('before_add_relation', rtype, session, |
|
1072 |
subject, rtype, object) |
|
1073 |
source.add_relation(session, subject, rtype, object) |
|
1074 |
if source.should_call_hooks: |
|
1075 |
self.hm.call_hooks('after_add_relation', rtype, session, |
|
1076 |
subject, rtype, object) |
|
1077 |
||
1078 |
def glob_delete_relation(self, session, subject, rtype, object): |
|
1079 |
"""delete a relation from the repository""" |
|
1080 |
assert subject is not None |
|
1081 |
assert rtype |
|
1082 |
assert object is not None |
|
1083 |
source = self.locate_relation_source(session, subject, rtype, object) |
|
1084 |
#print 'delete rel', subject, rtype, object |
|
1085 |
if source.should_call_hooks: |
|
1086 |
self.hm.call_hooks('before_delete_relation', rtype, session, |
|
1087 |
subject, rtype, object) |
|
1088 |
source.delete_relation(session, subject, rtype, object) |
|
1089 |
if self.schema.rschema(rtype).symetric: |
|
1090 |
# on symetric relation, we can't now in which sense it's |
|
1091 |
# stored so try to delete both |
|
1092 |
source.delete_relation(session, object, rtype, subject) |
|
1093 |
if source.should_call_hooks: |
|
1094 |
self.hm.call_hooks('after_delete_relation', rtype, session, |
|
1095 |
subject, rtype, object) |
|
1096 |
||
1097 |
||
1098 |
# pyro handling ########################################################### |
|
1482 | 1099 |
|
0 | 1100 |
def pyro_register(self, host=''): |
1101 |
"""register the repository as a pyro object""" |
|
1102 |
from Pyro import core |
|
1103 |
port = self.config['pyro-port'] |
|
1104 |
nshost, nsgroup = self.config['pyro-ns-host'], self.config['pyro-ns-group'] |
|
1105 |
nsgroup = ':' + nsgroup |
|
1106 |
core.initServer(banner=0) |
|
1107 |
daemon = core.Daemon(host=host, port=port) |
|
1108 |
daemon.useNameServer(self.pyro_nameserver(nshost, nsgroup)) |
|
1109 |
# use Delegation approach |
|
1110 |
impl = core.ObjBase() |
|
1111 |
impl.delegateTo(self) |
|
1112 |
nsid = self.config['pyro-id'] or self.config.appid |
|
1113 |
daemon.connect(impl, '%s.%s' % (nsgroup, nsid)) |
|
1114 |
msg = 'repository registered as a pyro object using group %s and id %s' |
|
1115 |
self.info(msg, nsgroup, nsid) |
|
1116 |
self.pyro_registered = True |
|
1117 |
return daemon |
|
1482 | 1118 |
|
0 | 1119 |
def pyro_nameserver(self, host=None, group=None): |
1120 |
"""locate and bind the the name server to the daemon""" |
|
1121 |
from Pyro import naming, errors |
|
1122 |
# locate the name server |
|
1123 |
nameserver = naming.NameServerLocator().getNS(host) |
|
1124 |
if group is not None: |
|
1125 |
# make sure our namespace group exists |
|
1126 |
try: |
|
1127 |
nameserver.createGroup(group) |
|
1128 |
except errors.NamingError: |
|
1129 |
pass |
|
1130 |
return nameserver |
|
1131 |
||
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1132 |
# multi-sources planner helpers ########################################### |
1482 | 1133 |
|
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1134 |
@cached |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1135 |
def rel_type_sources(self, rtype): |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1136 |
return [source for source in self.sources |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1137 |
if source.support_relation(rtype) |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1138 |
or rtype in source.dont_cross_relations] |
1482 | 1139 |
|
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1140 |
@cached |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1141 |
def can_cross_relation(self, rtype): |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1142 |
return [source for source in self.sources |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1143 |
if source.support_relation(rtype) |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1144 |
and rtype in source.cross_relations] |
1482 | 1145 |
|
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1146 |
@cached |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1147 |
def is_multi_sources_relation(self, rtype): |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1148 |
return any(source for source in self.sources |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1149 |
if not source is self.system_source |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1150 |
and source.support_relation(rtype)) |
1482 | 1151 |
|
0 | 1152 |
|
1153 |
def pyro_unregister(config): |
|
1154 |
"""unregister the repository from the pyro name server""" |
|
1155 |
nshost, nsgroup = config['pyro-ns-host'], config['pyro-ns-group'] |
|
1156 |
appid = config['pyro-id'] or config.appid |
|
1157 |
from Pyro import core, naming, errors |
|
1158 |
core.initClient(banner=False) |
|
1159 |
try: |
|
1160 |
nameserver = naming.NameServerLocator().getNS(nshost) |
|
1161 |
except errors.PyroError, ex: |
|
1162 |
# name server not responding |
|
1163 |
config.error('can\'t locate pyro name server: %s', ex) |
|
1164 |
return |
|
1165 |
try: |
|
1166 |
nameserver.unregister(':%s.%s' % (nsgroup, appid)) |
|
1167 |
config.info('%s unregistered from pyro name server', appid) |
|
1168 |
except errors.NamingError: |
|
1169 |
config.warning('%s already unregistered from pyro name server', appid) |
|
1170 |
||
1171 |
||
1172 |
from logging import getLogger |
|
1173 |
from cubicweb import set_log_methods |
|
1174 |
set_log_methods(Repository, getLogger('cubicweb.repository')) |