author | Aurelien Campeas <aurelien.campeas@logilab.fr> |
Mon, 22 Jun 2009 11:53:59 +0200 | |
branch | stable |
changeset 2131 | 00e6d1cb18ea |
parent 2101 | 08003e0354a7 |
child 2153 | d42d1eaefcdd |
permissions | -rw-r--r-- |
0 | 1 |
"""Defines the central class for the CubicWeb RQL server: the repository. |
2 |
||
3 |
The repository is an abstraction allowing execution of rql queries against |
|
4 |
data sources. Most of the work is actually done in helper classes. The |
|
5 |
repository mainly: |
|
6 |
||
7 |
* brings these classes all together to provide a single access |
|
8 |
point to a cubicweb application. |
|
9 |
* handles session management |
|
10 |
* provides method for pyro registration, to call if pyro is enabled |
|
11 |
||
12 |
||
13 |
:organization: Logilab |
|
1977
606923dff11b
big bunch of copyright / docstring update
Adrien Di Mascio <Adrien.DiMascio@logilab.fr>
parents:
1954
diff
changeset
|
14 |
:copyright: 2001-2009 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2. |
0 | 15 |
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr |
1977
606923dff11b
big bunch of copyright / docstring update
Adrien Di Mascio <Adrien.DiMascio@logilab.fr>
parents:
1954
diff
changeset
|
16 |
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses |
0 | 17 |
""" |
18 |
__docformat__ = "restructuredtext en" |
|
19 |
||
20 |
import sys |
|
21 |
import Queue |
|
22 |
from os.path import join, exists |
|
1016
26387b836099
use datetime instead of mx.DateTime
sylvain.thenault@logilab.fr
parents:
636
diff
changeset
|
23 |
from datetime import datetime |
0 | 24 |
from time import time, localtime, strftime |
25 |
||
26 |
from logilab.common.decorators import cached |
|
27 |
||
28 |
from yams import BadSchemaDefinition |
|
29 |
from rql import RQLSyntaxError |
|
30 |
||
31 |
from cubicweb import (CW_SOFTWARE_ROOT, UnknownEid, AuthenticationError, |
|
32 |
ETypeNotSupportedBySources, RTypeNotSupportedBySources, |
|
33 |
BadConnectionId, Unauthorized, ValidationError, |
|
34 |
ExecutionError, typed_eid, |
|
35 |
CW_MIGRATION_MAP) |
|
36 |
from cubicweb.cwvreg import CubicWebRegistry |
|
37 |
from cubicweb.schema import CubicWebSchema |
|
38 |
||
39 |
from cubicweb.server.utils import RepoThread, LoopTask |
|
40 |
from cubicweb.server.pool import ConnectionsPool, LateOperation, SingleLastOperation |
|
41 |
from cubicweb.server.session import Session, InternalSession |
|
42 |
from cubicweb.server.querier import QuerierHelper |
|
43 |
from cubicweb.server.sources import get_source |
|
44 |
from cubicweb.server.hooksmanager import HooksManager |
|
45 |
from cubicweb.server.hookhelper import rproperty |
|
46 |
||
47 |
||
48 |
class CleanupEidTypeCacheOp(SingleLastOperation): |
|
49 |
"""on rollback of a insert query or commit of delete query, we have to |
|
50 |
clear repository's cache from no more valid entries |
|
51 |
||
52 |
NOTE: querier's rqlst/solutions cache may have been polluted too with |
|
53 |
queries such as Any X WHERE X eid 32 if 32 has been rollbacked however |
|
54 |
generated queries are unpredictable and analysing all the cache probably |
|
55 |
too expensive. Notice that there is no pb when using args to specify eids |
|
56 |
instead of giving them into the rql string. |
|
57 |
""" |
|
58 |
||
59 |
def commit_event(self): |
|
60 |
"""the observed connections pool has been rollbacked, |
|
61 |
remove inserted eid from repository type/source cache |
|
62 |
""" |
|
2101
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
63 |
try: |
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
64 |
self.repo.clear_caches(self.session.transaction_data['pendingeids']) |
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
65 |
except KeyError: |
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
66 |
pass |
1482 | 67 |
|
0 | 68 |
def rollback_event(self): |
69 |
"""the observed connections pool has been rollbacked, |
|
70 |
remove inserted eid from repository type/source cache |
|
71 |
""" |
|
2101
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
72 |
try: |
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
73 |
self.repo.clear_caches(self.session.transaction_data['neweids']) |
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
74 |
except KeyError: |
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
75 |
pass |
0 | 76 |
|
77 |
||
78 |
class FTIndexEntityOp(LateOperation): |
|
79 |
"""operation to delay entity full text indexation to commit |
|
80 |
||
81 |
since fti indexing may trigger discovery of other entities, it should be |
|
82 |
triggered on precommit, not commit, and this should be done after other |
|
83 |
precommit operation which may add relations to the entity |
|
84 |
""" |
|
85 |
||
86 |
def precommit_event(self): |
|
87 |
session = self.session |
|
88 |
entity = self.entity |
|
2101
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
89 |
if entity.eid in session.transaction_data.get('pendingeids', ()): |
0 | 90 |
return # entity added and deleted in the same transaction |
91 |
session.repo.system_source.fti_unindex_entity(session, entity.eid) |
|
92 |
for container in entity.fti_containers(): |
|
93 |
session.repo.index_entity(session, container) |
|
1482 | 94 |
|
0 | 95 |
def commit_event(self): |
96 |
pass |
|
97 |
||
98 |
def del_existing_rel_if_needed(session, eidfrom, rtype, eidto): |
|
99 |
"""delete existing relation when adding a new one if card is 1 or ? |
|
100 |
||
101 |
have to be done once the new relation has been inserted to avoid having |
|
102 |
an entity without a relation for some time |
|
103 |
||
104 |
this kind of behaviour has to be done in the repository so we don't have |
|
105 |
hooks order hazardness |
|
106 |
""" |
|
107 |
# skip delete queries (only?) if session is an internal session. This is |
|
108 |
# hooks responsability to ensure they do not violate relation's cardinality |
|
109 |
if session.is_super_session: |
|
110 |
return |
|
111 |
card = rproperty(session, rtype, eidfrom, eidto, 'cardinality') |
|
112 |
# one may be tented to check for neweids but this may cause more than one |
|
113 |
# relation even with '1?' cardinality if thoses relations are added in the |
|
114 |
# same transaction where the entity is being created. This never occurs from |
|
115 |
# the web interface but may occurs during test or dbapi connection (though |
|
116 |
# not expected for this). So: don't do it, we pretend to ensure repository |
|
117 |
# consistency. |
|
118 |
# XXX should probably not use unsafe_execute! |
|
119 |
if card[0] in '1?': |
|
120 |
rschema = session.repo.schema.rschema(rtype) |
|
121 |
if not rschema.inlined: |
|
1320 | 122 |
session.unsafe_execute( |
123 |
'DELETE X %s Y WHERE X eid %%(x)s, NOT Y eid %%(y)s' % rtype, |
|
124 |
{'x': eidfrom, 'y': eidto}, 'x') |
|
0 | 125 |
if card[1] in '1?': |
1320 | 126 |
session.unsafe_execute( |
127 |
'DELETE X %s Y WHERE NOT X eid %%(x)s, Y eid %%(y)s' % rtype, |
|
128 |
{'x': eidfrom, 'y': eidto}, 'y') |
|
0 | 129 |
|
1482 | 130 |
|
0 | 131 |
class Repository(object): |
132 |
"""a repository provides access to a set of persistent storages for |
|
133 |
entities and relations |
|
134 |
||
135 |
XXX protect pyro access |
|
136 |
""" |
|
1482 | 137 |
|
0 | 138 |
def __init__(self, config, vreg=None, debug=False): |
139 |
self.config = config |
|
140 |
if vreg is None: |
|
141 |
vreg = CubicWebRegistry(config, debug) |
|
142 |
self.vreg = vreg |
|
143 |
self.pyro_registered = False |
|
144 |
self.info('starting repository from %s', self.config.apphome) |
|
145 |
# dictionary of opened sessions |
|
146 |
self._sessions = {} |
|
147 |
# list of functions to be called at regular interval |
|
148 |
self._looping_tasks = [] |
|
149 |
# list of running threads |
|
150 |
self._running_threads = [] |
|
151 |
# initial schema, should be build or replaced latter |
|
152 |
self.schema = CubicWebSchema(config.appid) |
|
153 |
# querier helper, need to be created after sources initialization |
|
154 |
self.querier = QuerierHelper(self, self.schema) |
|
1187 | 155 |
# should we reindex in changes? |
1217 | 156 |
self.do_fti = not config['delay-full-text-indexation'] |
0 | 157 |
# sources |
158 |
self.sources = [] |
|
159 |
self.sources_by_uri = {} |
|
160 |
# FIXME: store additional sources info in the system database ? |
|
161 |
# FIXME: sources should be ordered (add_entity priority) |
|
162 |
for uri, source_config in config.sources().items(): |
|
163 |
if uri == 'admin': |
|
164 |
# not an actual source |
|
1482 | 165 |
continue |
0 | 166 |
source = self.get_source(uri, source_config) |
167 |
self.sources_by_uri[uri] = source |
|
168 |
self.sources.append(source) |
|
169 |
self.system_source = self.sources_by_uri['system'] |
|
170 |
# ensure system source is the first one |
|
171 |
self.sources.remove(self.system_source) |
|
172 |
self.sources.insert(0, self.system_source) |
|
173 |
# cache eid -> type / source |
|
174 |
self._type_source_cache = {} |
|
175 |
# cache (extid, source uri) -> eid |
|
176 |
self._extid_cache = {} |
|
177 |
# create the hooks manager |
|
178 |
self.hm = HooksManager(self.schema) |
|
179 |
# open some connections pools |
|
180 |
self._available_pools = Queue.Queue() |
|
181 |
self._available_pools.put_nowait(ConnectionsPool(self.sources)) |
|
182 |
if config.read_application_schema: |
|
183 |
# normal start: load the application schema from the database |
|
184 |
self.fill_schema() |
|
185 |
elif config.bootstrap_schema: |
|
186 |
# usually during repository creation |
|
187 |
self.warning("set fs application'schema as bootstrap schema") |
|
188 |
config.bootstrap_cubes() |
|
189 |
self.set_bootstrap_schema(self.config.load_schema()) |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
190 |
# need to load the Any and CWUser entity types |
0 | 191 |
self.vreg.schema = self.schema |
192 |
etdirectory = join(CW_SOFTWARE_ROOT, 'entities') |
|
1317 | 193 |
self.vreg.init_registration([etdirectory]) |
1316
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
194 |
self.vreg.load_file(join(etdirectory, '__init__.py'), |
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
195 |
'cubicweb.entities.__init__') |
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
196 |
self.vreg.load_file(join(etdirectory, 'authobjs.py'), |
6d71d38822ee
introduce init_registration method and call it in repo initialization
sylvain.thenault@logilab.fr
parents:
1263
diff
changeset
|
197 |
'cubicweb.entities.authobjs') |
0 | 198 |
else: |
199 |
# test start: use the file system schema (quicker) |
|
200 |
self.warning("set fs application'schema") |
|
201 |
config.bootstrap_cubes() |
|
202 |
self.set_schema(self.config.load_schema()) |
|
203 |
if not config.creating: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
204 |
if 'CWProperty' in self.schema: |
0 | 205 |
self.vreg.init_properties(self.properties()) |
206 |
# call source's init method to complete their initialisation if |
|
207 |
# needed (for instance looking for persistent configuration using an |
|
208 |
# internal session, which is not possible until pools have been |
|
209 |
# initialized) |
|
210 |
for source in self.sources: |
|
211 |
source.init() |
|
212 |
# call application level initialisation hooks |
|
213 |
self.hm.call_hooks('server_startup', repo=self) |
|
214 |
# register a task to cleanup expired session |
|
215 |
self.looping_task(self.config['session-time']/3., |
|
216 |
self.clean_sessions) |
|
217 |
else: |
|
218 |
# call init_creating so for instance native source can configurate |
|
219 |
# tsearch according to postgres version |
|
220 |
for source in self.sources: |
|
221 |
source.init_creating() |
|
222 |
# close initialization pool and reopen fresh ones for proper |
|
223 |
# initialization now that we know cubes |
|
1482 | 224 |
self._get_pool().close(True) |
0 | 225 |
for i in xrange(config['connections-pool-size']): |
226 |
self._available_pools.put_nowait(ConnectionsPool(self.sources)) |
|
1883
011e13d74cfc
shuting -> shutting
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
1880
diff
changeset
|
227 |
self._shutting_down = False |
1482 | 228 |
|
0 | 229 |
# internals ############################################################### |
230 |
||
231 |
def get_source(self, uri, source_config): |
|
232 |
source_config['uri'] = uri |
|
233 |
return get_source(source_config, self.schema, self) |
|
1482 | 234 |
|
0 | 235 |
def set_schema(self, schema, resetvreg=True): |
236 |
schema.rebuild_infered_relations() |
|
237 |
self.info('set schema %s %#x', schema.name, id(schema)) |
|
238 |
self.debug(', '.join(sorted(str(e) for e in schema.entities()))) |
|
239 |
self.querier.set_schema(schema) |
|
240 |
for source in self.sources: |
|
241 |
source.set_schema(schema) |
|
242 |
self.schema = schema |
|
243 |
if resetvreg: |
|
244 |
# full reload of all appobjects |
|
245 |
self.vreg.reset() |
|
246 |
self.vreg.set_schema(schema) |
|
247 |
self.hm.set_schema(schema) |
|
248 |
self.hm.register_system_hooks(self.config) |
|
249 |
# application specific hooks |
|
250 |
if self.config.application_hooks: |
|
251 |
self.info('loading application hooks') |
|
252 |
self.hm.register_hooks(self.config.load_hooks(self.vreg)) |
|
253 |
||
254 |
def fill_schema(self): |
|
255 |
"""lod schema from the repository""" |
|
256 |
from cubicweb.server.schemaserial import deserialize_schema |
|
257 |
self.info('loading schema from the repository') |
|
258 |
appschema = CubicWebSchema(self.config.appid) |
|
259 |
self.set_bootstrap_schema(self.config.load_bootstrap_schema()) |
|
260 |
self.debug('deserializing db schema into %s %#x', appschema.name, id(appschema)) |
|
261 |
session = self.internal_session() |
|
262 |
try: |
|
263 |
try: |
|
264 |
deserialize_schema(appschema, session) |
|
265 |
except BadSchemaDefinition: |
|
266 |
raise |
|
267 |
except Exception, ex: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
268 |
import traceback |
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
269 |
traceback.print_exc() |
1482 | 270 |
raise Exception('Is the database initialised ? (cause: %s)' % |
0 | 271 |
(ex.args and ex.args[0].strip() or 'unknown')), \ |
272 |
None, sys.exc_info()[-1] |
|
273 |
self.info('set the actual schema') |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
274 |
# XXX have to do this since CWProperty isn't in the bootstrap schema |
0 | 275 |
# it'll be redone in set_schema |
276 |
self.set_bootstrap_schema(appschema) |
|
277 |
# 2.49 migration |
|
278 |
if exists(join(self.config.apphome, 'vc.conf')): |
|
279 |
session.set_pool() |
|
280 |
if not 'template' in file(join(self.config.apphome, 'vc.conf')).read(): |
|
281 |
# remaning from cubicweb < 2.38... |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
282 |
session.execute('DELETE CWProperty X WHERE X pkey "system.version.template"') |
0 | 283 |
session.commit() |
284 |
finally: |
|
285 |
session.close() |
|
286 |
self.config.init_cubes(self.get_cubes()) |
|
287 |
self.set_schema(appschema) |
|
1482 | 288 |
|
0 | 289 |
def set_bootstrap_schema(self, schema): |
290 |
"""disable hooks when setting a bootstrap schema, but restore |
|
291 |
the configuration for the next time |
|
292 |
""" |
|
293 |
config = self.config |
|
294 |
# XXX refactor |
|
295 |
config.core_hooks = False |
|
296 |
config.usergroup_hooks = False |
|
297 |
config.schema_hooks = False |
|
298 |
config.notification_hooks = False |
|
299 |
config.application_hooks = False |
|
300 |
self.set_schema(schema, resetvreg=False) |
|
301 |
config.core_hooks = True |
|
302 |
config.usergroup_hooks = True |
|
303 |
config.schema_hooks = True |
|
304 |
config.notification_hooks = True |
|
305 |
config.application_hooks = True |
|
1482 | 306 |
|
0 | 307 |
def start_looping_tasks(self): |
308 |
assert isinstance(self._looping_tasks, list), 'already started' |
|
309 |
for i, (interval, func) in enumerate(self._looping_tasks): |
|
310 |
self._looping_tasks[i] = task = LoopTask(interval, func) |
|
311 |
self.info('starting task %s with interval %.2fs', task.name, |
|
312 |
interval) |
|
313 |
task.start() |
|
314 |
# ensure no tasks will be further added |
|
315 |
self._looping_tasks = tuple(self._looping_tasks) |
|
316 |
||
317 |
def looping_task(self, interval, func): |
|
318 |
"""register a function to be called every `interval` seconds. |
|
1482 | 319 |
|
0 | 320 |
looping tasks can only be registered during repository initialization, |
321 |
once done this method will fail. |
|
322 |
""" |
|
323 |
try: |
|
324 |
self._looping_tasks.append( (interval, func) ) |
|
325 |
except AttributeError: |
|
326 |
raise RuntimeError("can't add looping task once the repository is started") |
|
327 |
||
328 |
def threaded_task(self, func): |
|
329 |
"""start function in a separated thread""" |
|
330 |
t = RepoThread(func, self._running_threads) |
|
331 |
t.start() |
|
1482 | 332 |
|
0 | 333 |
#@locked |
334 |
def _get_pool(self): |
|
335 |
try: |
|
336 |
return self._available_pools.get(True, timeout=5) |
|
337 |
except Queue.Empty: |
|
338 |
raise Exception('no pool available after 5 secs, probably either a ' |
|
339 |
'bug in code (to many uncommited/rollbacked ' |
|
340 |
'connections) or to much load on the server (in ' |
|
341 |
'which case you can try to set a bigger ' |
|
342 |
'connections pools size)') |
|
1482 | 343 |
|
0 | 344 |
def _free_pool(self, pool): |
345 |
pool.rollback() |
|
346 |
self._available_pools.put_nowait(pool) |
|
347 |
||
348 |
def pinfo(self): |
|
349 |
# XXX: session.pool is accessed from a local storage, would be interesting |
|
350 |
# to see if there is a pool set in any thread specific data) |
|
351 |
import threading |
|
352 |
return '%s: %s (%s)' % (self._available_pools.qsize(), |
|
353 |
','.join(session.user.login for session in self._sessions.values() |
|
354 |
if session.pool), |
|
355 |
threading.currentThread()) |
|
356 |
def shutdown(self): |
|
357 |
"""called on server stop event to properly close opened sessions and |
|
358 |
connections |
|
359 |
""" |
|
1883
011e13d74cfc
shuting -> shutting
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
1880
diff
changeset
|
360 |
self._shutting_down = True |
0 | 361 |
if isinstance(self._looping_tasks, tuple): # if tasks have been started |
362 |
for looptask in self._looping_tasks: |
|
363 |
self.info('canceling task %s...', looptask.name) |
|
364 |
looptask.cancel() |
|
365 |
looptask.join() |
|
366 |
self.info('task %s finished', looptask.name) |
|
367 |
for thread in self._running_threads: |
|
368 |
self.info('waiting thread %s...', thread.name) |
|
369 |
thread.join() |
|
370 |
self.info('thread %s finished', thread.name) |
|
371 |
self.hm.call_hooks('server_shutdown', repo=self) |
|
372 |
self.close_sessions() |
|
373 |
while not self._available_pools.empty(): |
|
374 |
pool = self._available_pools.get_nowait() |
|
375 |
try: |
|
376 |
pool.close(True) |
|
377 |
except: |
|
378 |
self.exception('error while closing %s' % pool) |
|
379 |
continue |
|
380 |
if self.pyro_registered: |
|
381 |
pyro_unregister(self.config) |
|
382 |
hits, misses = self.querier.cache_hit, self.querier.cache_miss |
|
383 |
try: |
|
384 |
self.info('rqlt st cache hit/miss: %s/%s (%s%% hits)', hits, misses, |
|
385 |
(hits * 100) / (hits + misses)) |
|
386 |
hits, misses = self.system_source.cache_hit, self.system_source.cache_miss |
|
387 |
self.info('sql cache hit/miss: %s/%s (%s%% hits)', hits, misses, |
|
388 |
(hits * 100) / (hits + misses)) |
|
389 |
nocache = self.system_source.no_cache |
|
390 |
self.info('sql cache usage: %s/%s (%s%%)', hits+ misses, nocache, |
|
391 |
((hits + misses) * 100) / (hits + misses + nocache)) |
|
392 |
except ZeroDivisionError: |
|
393 |
pass |
|
1482 | 394 |
|
0 | 395 |
def authenticate_user(self, session, login, password): |
396 |
"""validate login / password, raise AuthenticationError on failure |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
397 |
return associated CWUser instance on success |
0 | 398 |
""" |
399 |
for source in self.sources: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
400 |
if source.support_entity('CWUser'): |
0 | 401 |
try: |
402 |
eid = source.authenticate(session, login, password) |
|
403 |
break |
|
404 |
except AuthenticationError: |
|
405 |
continue |
|
406 |
else: |
|
407 |
raise AuthenticationError('authentication failed with all sources') |
|
408 |
euser = self._build_user(session, eid) |
|
409 |
if self.config.consider_user_state and \ |
|
410 |
not euser.state in euser.AUTHENTICABLE_STATES: |
|
411 |
raise AuthenticationError('user is not in authenticable state') |
|
412 |
return euser |
|
413 |
||
414 |
def _build_user(self, session, eid): |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
415 |
"""return a CWUser entity for user with the given eid""" |
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
416 |
cls = self.vreg.etype_class('CWUser') |
0 | 417 |
rql = cls.fetch_rql(session.user, ['X eid %(x)s']) |
418 |
rset = session.execute(rql, {'x': eid}, 'x') |
|
419 |
assert len(rset) == 1, rset |
|
420 |
euser = rset.get_entity(0, 0) |
|
1138
22f634977c95
make pylint happy, fix some bugs on the way
sylvain.thenault@logilab.fr
parents:
1016
diff
changeset
|
421 |
# pylint: disable-msg=W0104 |
0 | 422 |
# prefetch / cache euser's groups and properties. This is especially |
423 |
# useful for internal sessions to avoid security insertions |
|
424 |
euser.groups |
|
425 |
euser.properties |
|
426 |
return euser |
|
1482 | 427 |
|
0 | 428 |
# public (dbapi) interface ################################################ |
1482 | 429 |
|
0 | 430 |
def get_schema(self): |
431 |
"""return the application schema. This is a public method, not |
|
432 |
requiring a session id |
|
433 |
""" |
|
434 |
try: |
|
435 |
# necessary to support pickling used by pyro |
|
436 |
self.schema.__hashmode__ = 'pickle' |
|
437 |
return self.schema |
|
438 |
finally: |
|
439 |
self.schema.__hashmode__ = None |
|
440 |
||
441 |
def get_cubes(self): |
|
442 |
"""return the list of cubes used by this application. This is a |
|
443 |
public method, not requiring a session id. |
|
444 |
""" |
|
445 |
versions = self.get_versions(not self.config.creating) |
|
446 |
cubes = list(versions) |
|
447 |
cubes.remove('cubicweb') |
|
448 |
return cubes |
|
449 |
||
450 |
@cached |
|
451 |
def get_versions(self, checkversions=False): |
|
452 |
"""return the a dictionary containing cubes used by this application |
|
453 |
as key with their version as value, including cubicweb version. This is a |
|
454 |
public method, not requiring a session id. |
|
455 |
""" |
|
456 |
from logilab.common.changelog import Version |
|
457 |
vcconf = {} |
|
458 |
session = self.internal_session() |
|
459 |
try: |
|
460 |
for pk, version in session.execute( |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
461 |
'Any K,V WHERE P is CWProperty, P value V, P pkey K, ' |
0 | 462 |
'P pkey ~="system.version.%"', build_descr=False): |
463 |
cube = pk.split('.')[-1] |
|
464 |
# XXX cubicweb migration |
|
465 |
if cube in CW_MIGRATION_MAP: |
|
466 |
cube = CW_MIGRATION_MAP[cube] |
|
467 |
version = Version(version) |
|
468 |
vcconf[cube] = version |
|
469 |
if checkversions: |
|
470 |
if cube != 'cubicweb': |
|
471 |
fsversion = self.config.cube_version(cube) |
|
472 |
else: |
|
473 |
fsversion = self.config.cubicweb_version() |
|
474 |
if version < fsversion: |
|
475 |
msg = ('application has %s version %s but %s ' |
|
476 |
'is installed. Run "cubicweb-ctl upgrade".') |
|
477 |
raise ExecutionError(msg % (cube, version, fsversion)) |
|
478 |
finally: |
|
479 |
session.close() |
|
480 |
return vcconf |
|
1482 | 481 |
|
0 | 482 |
@cached |
483 |
def source_defs(self): |
|
484 |
sources = self.config.sources().copy() |
|
485 |
# remove manager information |
|
486 |
sources.pop('admin', None) |
|
487 |
# remove sensitive information |
|
488 |
for uri, sourcedef in sources.iteritems(): |
|
489 |
sourcedef = sourcedef.copy() |
|
490 |
self.sources_by_uri[uri].remove_sensitive_information(sourcedef) |
|
491 |
sources[uri] = sourcedef |
|
492 |
return sources |
|
493 |
||
494 |
def properties(self): |
|
495 |
"""return a result set containing system wide properties""" |
|
496 |
session = self.internal_session() |
|
497 |
try: |
|
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
498 |
return session.execute('Any K,V WHERE P is CWProperty,' |
0 | 499 |
'P pkey K, P value V, NOT P for_user U', |
500 |
build_descr=False) |
|
501 |
finally: |
|
502 |
session.close() |
|
503 |
||
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
504 |
def register_user(self, login, password, email=None, **kwargs): |
0 | 505 |
"""check a user with the given login exists, if not create it with the |
506 |
given password. This method is designed to be used for anonymous |
|
507 |
registration on public web site. |
|
508 |
""" |
|
1664
03ebeccf9f1d
add XXX before 2 calls to self.repo.internal_session() on the web interface side
Florent <florent@secondweb.fr>
parents:
1482
diff
changeset
|
509 |
# XXX should not be called from web interface |
0 | 510 |
session = self.internal_session() |
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
511 |
# for consistency, keep same error as unique check hook (although not required) |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
512 |
errmsg = session._('the value "%s" is already used, use another one') |
0 | 513 |
try: |
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
514 |
if (session.execute('CWUser X WHERE X login %(login)s', {'login': login}) |
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
515 |
or session.execute('CWUser X WHERE X use_email C, C address %(login)s', |
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
516 |
{'login': login})): |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
517 |
raise ValidationError(None, {'login': errmsg % login}) |
0 | 518 |
# we have to create the user |
1398
5fe84a5f7035
rename internal entity types to have CW prefix instead of E
sylvain.thenault@logilab.fr
parents:
1372
diff
changeset
|
519 |
user = self.vreg.etype_class('CWUser')(session, None) |
0 | 520 |
if isinstance(password, unicode): |
521 |
# password should *always* be utf8 encoded |
|
522 |
password = password.encode('UTF8') |
|
523 |
kwargs['login'] = login |
|
524 |
kwargs['upassword'] = password |
|
525 |
user.update(kwargs) |
|
526 |
self.glob_add_entity(session, user) |
|
527 |
session.execute('SET X in_group G WHERE X eid %(x)s, G name "users"', |
|
528 |
{'x': user.eid}) |
|
1372
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
529 |
if email or '@' in login: |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
530 |
d = {'login': login, 'email': email or login} |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
531 |
if session.execute('EmailAddress X WHERE X address %(email)s', d): |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
532 |
raise ValidationError(None, {'address': errmsg % d['email']}) |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
533 |
session.execute('INSERT EmailAddress X: X address %(email)s, ' |
d4264cd876e1
register_user can now also set an email
Florent <florent@secondweb.fr>
parents:
1320
diff
changeset
|
534 |
'U primary_email X, U use_email X WHERE U login %(login)s', d) |
0 | 535 |
session.commit() |
536 |
finally: |
|
537 |
session.close() |
|
594
76218d42d21f
return success or not on creation of user
Arthur Lutz <arthur.lutz@logilab.fr>
parents:
479
diff
changeset
|
538 |
return True |
1482 | 539 |
|
0 | 540 |
def connect(self, login, password, cnxprops=None): |
541 |
"""open a connection for a given user |
|
542 |
||
543 |
base_url may be needed to send mails |
|
544 |
cnxtype indicate if this is a pyro connection or a in-memory connection |
|
1482 | 545 |
|
0 | 546 |
raise `AuthenticationError` if the authentication failed |
547 |
raise `ConnectionError` if we can't open a connection |
|
548 |
""" |
|
549 |
# use an internal connection |
|
550 |
session = self.internal_session() |
|
551 |
# try to get a user object |
|
552 |
try: |
|
553 |
user = self.authenticate_user(session, login, password) |
|
554 |
finally: |
|
555 |
session.close() |
|
556 |
session = Session(user, self, cnxprops) |
|
557 |
user.req = user.rset.req = session |
|
558 |
user.clear_related_cache() |
|
559 |
self._sessions[session.id] = session |
|
560 |
self.info('opened %s', session) |
|
561 |
self.hm.call_hooks('session_open', session=session) |
|
562 |
# commit session at this point in case write operation has been done |
|
563 |
# during `session_open` hooks |
|
564 |
session.commit() |
|
565 |
return session.id |
|
566 |
||
567 |
def execute(self, sessionid, rqlstring, args=None, eid_key=None, build_descr=True): |
|
568 |
"""execute a RQL query |
|
569 |
||
570 |
* rqlstring should be an unicode string or a plain ascii string |
|
571 |
* args the optional parameters used in the query |
|
572 |
* build_descr is a flag indicating if the description should be |
|
573 |
built on select queries |
|
574 |
""" |
|
575 |
session = self._get_session(sessionid, setpool=True) |
|
576 |
try: |
|
577 |
try: |
|
578 |
return self.querier.execute(session, rqlstring, args, eid_key, |
|
579 |
build_descr) |
|
580 |
except (Unauthorized, RQLSyntaxError): |
|
581 |
raise |
|
582 |
except ValidationError, ex: |
|
583 |
# need ValidationError normalization here so error may pass |
|
584 |
# through pyro |
|
585 |
if hasattr(ex.entity, 'eid'): |
|
586 |
ex.entity = ex.entity.eid # error raised by yams |
|
587 |
args = list(ex.args) |
|
588 |
args[0] = ex.entity |
|
589 |
ex.args = tuple(args) |
|
590 |
raise |
|
591 |
except: |
|
592 |
# FIXME: check error to catch internal errors |
|
593 |
self.exception('unexpected error') |
|
594 |
raise |
|
595 |
finally: |
|
596 |
session.reset_pool() |
|
1482 | 597 |
|
0 | 598 |
def describe(self, sessionid, eid): |
599 |
"""return a tuple (type, source, extid) for the entity with id <eid>""" |
|
600 |
session = self._get_session(sessionid, setpool=True) |
|
601 |
try: |
|
602 |
return self.type_and_source_from_eid(eid, session) |
|
603 |
finally: |
|
604 |
session.reset_pool() |
|
605 |
||
606 |
def check_session(self, sessionid): |
|
607 |
"""raise `BadSessionId` if the connection is no more valid""" |
|
608 |
self._get_session(sessionid, setpool=False) |
|
609 |
||
610 |
def get_shared_data(self, sessionid, key, default=None, pop=False): |
|
611 |
"""return the session's data dictionary""" |
|
612 |
session = self._get_session(sessionid, setpool=False) |
|
613 |
return session.get_shared_data(key, default, pop) |
|
614 |
||
615 |
def set_shared_data(self, sessionid, key, value, querydata=False): |
|
616 |
"""set value associated to `key` in shared data |
|
617 |
||
618 |
if `querydata` is true, the value will be added to the repository |
|
619 |
session's query data which are cleared on commit/rollback of the current |
|
620 |
transaction, and won't be available through the connexion, only on the |
|
621 |
repository side. |
|
622 |
""" |
|
623 |
session = self._get_session(sessionid, setpool=False) |
|
624 |
session.set_shared_data(key, value, querydata) |
|
625 |
||
626 |
def commit(self, sessionid): |
|
627 |
"""commit transaction for the session with the given id""" |
|
628 |
self.debug('begin commit for session %s', sessionid) |
|
629 |
try: |
|
1880
293fe4b49e28
two in one: #343320: Logging out while deleting a CWUser blocks the cw server / #342692: ensure transaction state when Ctrl-C or other stop signal is received
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1664
diff
changeset
|
630 |
self._get_session(sessionid).commit() |
1482 | 631 |
except (ValidationError, Unauthorized): |
0 | 632 |
raise |
633 |
except: |
|
634 |
self.exception('unexpected error') |
|
635 |
raise |
|
1482 | 636 |
|
0 | 637 |
def rollback(self, sessionid): |
638 |
"""commit transaction for the session with the given id""" |
|
639 |
self.debug('begin rollback for session %s', sessionid) |
|
640 |
try: |
|
1880
293fe4b49e28
two in one: #343320: Logging out while deleting a CWUser blocks the cw server / #342692: ensure transaction state when Ctrl-C or other stop signal is received
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1664
diff
changeset
|
641 |
self._get_session(sessionid).rollback() |
0 | 642 |
except: |
643 |
self.exception('unexpected error') |
|
644 |
raise |
|
645 |
||
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
646 |
def close(self, sessionid, checkshuttingdown=True): |
0 | 647 |
"""close the session with the given id""" |
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
648 |
session = self._get_session(sessionid, setpool=True, |
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
649 |
checkshuttingdown=checkshuttingdown) |
0 | 650 |
# operation uncommited before close are rollbacked before hook is called |
651 |
session.rollback() |
|
652 |
self.hm.call_hooks('session_close', session=session) |
|
653 |
# commit session at this point in case write operation has been done |
|
654 |
# during `session_close` hooks |
|
655 |
session.commit() |
|
656 |
session.close() |
|
657 |
del self._sessions[sessionid] |
|
658 |
self.info('closed session %s for user %s', sessionid, session.user.login) |
|
1482 | 659 |
|
0 | 660 |
def user_info(self, sessionid, props=None): |
661 |
"""this method should be used by client to: |
|
662 |
* check session id validity |
|
663 |
* update user information on each user's request (i.e. groups and |
|
664 |
custom properties) |
|
665 |
""" |
|
666 |
session = self._get_session(sessionid, setpool=False) |
|
667 |
if props: |
|
668 |
# update session properties |
|
669 |
for prop, value in props.items(): |
|
670 |
session.change_property(prop, value) |
|
671 |
user = session.user |
|
672 |
return user.eid, user.login, user.groups, user.properties |
|
1482 | 673 |
|
0 | 674 |
# public (inter-repository) interface ##################################### |
1482 | 675 |
|
0 | 676 |
def entities_modified_since(self, etypes, mtime): |
677 |
"""function designed to be called from an external repository which |
|
678 |
is using this one as a rql source for synchronization, and return a |
|
679 |
3-uple containing : |
|
680 |
* the local date |
|
681 |
* list of (etype, eid) of entities of the given types which have been |
|
682 |
modified since the given timestamp (actually entities whose full text |
|
683 |
index content has changed) |
|
684 |
* list of (etype, eid) of entities of the given types which have been |
|
685 |
deleted since the given timestamp |
|
686 |
""" |
|
687 |
session = self.internal_session() |
|
1016
26387b836099
use datetime instead of mx.DateTime
sylvain.thenault@logilab.fr
parents:
636
diff
changeset
|
688 |
updatetime = datetime.now() |
0 | 689 |
try: |
690 |
modentities, delentities = self.system_source.modified_entities( |
|
691 |
session, etypes, mtime) |
|
692 |
return updatetime, modentities, delentities |
|
693 |
finally: |
|
694 |
session.close() |
|
695 |
||
696 |
# session handling ######################################################## |
|
1482 | 697 |
|
0 | 698 |
def close_sessions(self): |
699 |
"""close every opened sessions""" |
|
700 |
for sessionid in self._sessions.keys(): |
|
701 |
try: |
|
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
702 |
self.close(sessionid, checkshuttingdown=False) |
0 | 703 |
except: |
704 |
self.exception('error while closing session %s' % sessionid) |
|
705 |
||
706 |
def clean_sessions(self): |
|
707 |
"""close sessions not used since an amount of time specified in the |
|
708 |
configuration |
|
709 |
""" |
|
710 |
mintime = time() - self.config['session-time'] |
|
711 |
self.debug('cleaning session unused since %s', |
|
712 |
strftime('%T', localtime(mintime))) |
|
713 |
nbclosed = 0 |
|
714 |
for session in self._sessions.values(): |
|
715 |
if session.timestamp < mintime: |
|
716 |
self.close(session.id) |
|
717 |
nbclosed += 1 |
|
718 |
return nbclosed |
|
1482 | 719 |
|
0 | 720 |
def internal_session(self, cnxprops=None): |
721 |
"""return a dbapi like connection/cursor using internal user which |
|
722 |
have every rights on the repository. You'll *have to* commit/rollback |
|
723 |
or close (rollback implicitly) the session once the job's done, else |
|
724 |
you'll leak connections pool up to the time where no more pool is |
|
725 |
available, causing irremediable freeze... |
|
726 |
""" |
|
727 |
session = InternalSession(self, cnxprops) |
|
728 |
session.set_pool() |
|
729 |
return session |
|
1482 | 730 |
|
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
731 |
def _get_session(self, sessionid, setpool=False, checkshuttingdown=True): |
0 | 732 |
"""return the user associated to the given session identifier""" |
1939
67e7379edd96
#343379: disturbing message on upgrade
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1883
diff
changeset
|
733 |
if checkshuttingdown and self._shutting_down: |
1883
011e13d74cfc
shuting -> shutting
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
1880
diff
changeset
|
734 |
raise Exception('Repository is shutting down') |
0 | 735 |
try: |
736 |
session = self._sessions[sessionid] |
|
737 |
except KeyError: |
|
738 |
raise BadConnectionId('No such session %s' % sessionid) |
|
739 |
if setpool: |
|
740 |
session.set_pool() |
|
741 |
return session |
|
742 |
||
743 |
# data sources handling ################################################### |
|
744 |
# * correspondance between eid and (type, source) |
|
745 |
# * correspondance between eid and local id (i.e. specific to a given source) |
|
746 |
# * searchable text indexes |
|
1482 | 747 |
|
0 | 748 |
def type_and_source_from_eid(self, eid, session=None): |
749 |
"""return a tuple (type, source, extid) for the entity with id <eid>""" |
|
750 |
try: |
|
751 |
eid = typed_eid(eid) |
|
752 |
except ValueError: |
|
753 |
raise UnknownEid(eid) |
|
754 |
try: |
|
755 |
return self._type_source_cache[eid] |
|
756 |
except KeyError: |
|
757 |
if session is None: |
|
758 |
session = self.internal_session() |
|
759 |
reset_pool = True |
|
760 |
else: |
|
761 |
reset_pool = False |
|
762 |
try: |
|
763 |
etype, uri, extid = self.system_source.eid_type_source(session, |
|
764 |
eid) |
|
765 |
finally: |
|
766 |
if reset_pool: |
|
767 |
session.reset_pool() |
|
768 |
self._type_source_cache[eid] = (etype, uri, extid) |
|
769 |
if uri != 'system': |
|
770 |
self._extid_cache[(extid, uri)] = eid |
|
771 |
return etype, uri, extid |
|
772 |
||
773 |
def clear_caches(self, eids): |
|
774 |
etcache = self._type_source_cache |
|
775 |
extidcache = self._extid_cache |
|
776 |
rqlcache = self.querier._rql_cache |
|
777 |
for eid in eids: |
|
778 |
try: |
|
779 |
etype, uri, extid = etcache.pop(typed_eid(eid)) # may be a string in some cases |
|
780 |
rqlcache.pop('%s X WHERE X eid %s' % (etype, eid), None) |
|
781 |
extidcache.pop((extid, uri), None) |
|
782 |
except KeyError: |
|
783 |
etype = None |
|
784 |
rqlcache.pop('Any X WHERE X eid %s' % eid, None) |
|
785 |
for source in self.sources: |
|
786 |
source.clear_eid_cache(eid, etype) |
|
1482 | 787 |
|
0 | 788 |
def type_from_eid(self, eid, session=None): |
789 |
"""return the type of the entity with id <eid>""" |
|
790 |
return self.type_and_source_from_eid(eid, session)[0] |
|
1482 | 791 |
|
0 | 792 |
def source_from_eid(self, eid, session=None): |
793 |
"""return the source for the given entity's eid""" |
|
794 |
return self.sources_by_uri[self.type_and_source_from_eid(eid, session)[1]] |
|
1482 | 795 |
|
0 | 796 |
def eid2extid(self, source, eid, session=None): |
797 |
"""get local id from an eid""" |
|
798 |
etype, uri, extid = self.type_and_source_from_eid(eid, session) |
|
799 |
if source.uri != uri: |
|
800 |
# eid not from the given source |
|
801 |
raise UnknownEid(eid) |
|
802 |
return extid |
|
803 |
||
1954 | 804 |
def extid2eid(self, source, extid, etype, session=None, insert=True, |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
805 |
recreate=False): |
0 | 806 |
"""get eid from a local id. An eid is attributed if no record is found""" |
1954 | 807 |
cachekey = (extid, source.uri) |
0 | 808 |
try: |
809 |
return self._extid_cache[cachekey] |
|
810 |
except KeyError: |
|
811 |
pass |
|
812 |
reset_pool = False |
|
813 |
if session is None: |
|
814 |
session = self.internal_session() |
|
815 |
reset_pool = True |
|
1954 | 816 |
eid = self.system_source.extid2eid(session, source, extid) |
0 | 817 |
if eid is not None: |
818 |
self._extid_cache[cachekey] = eid |
|
1954 | 819 |
self._type_source_cache[eid] = (etype, source.uri, extid) |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
820 |
if recreate: |
1954 | 821 |
entity = source.before_entity_insertion(session, extid, etype, eid) |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
822 |
entity._cw_recreating = True |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
823 |
if source.should_call_hooks: |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
824 |
self.hm.call_hooks('before_add_entity', etype, session, entity) |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
825 |
# XXX add fti op ? |
1954 | 826 |
source.after_entity_insertion(session, extid, entity) |
1250
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
827 |
if source.should_call_hooks: |
5c20a7f13c84
new recreate argument to extid2eid when an external source want to recreate entities previously imported with a predictable ext id
sylvain.thenault@logilab.fr
parents:
1228
diff
changeset
|
828 |
self.hm.call_hooks('after_add_entity', etype, session, entity) |
0 | 829 |
if reset_pool: |
830 |
session.reset_pool() |
|
831 |
return eid |
|
832 |
if not insert: |
|
833 |
return |
|
1954 | 834 |
# no link between extid and eid, create one using an internal session |
0 | 835 |
# since the current session user may not have required permissions to |
836 |
# do necessary stuff and we don't want to commit user session. |
|
837 |
# |
|
450
5e14ea0e81c8
a note for later
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
341
diff
changeset
|
838 |
# Moreover, even if session is already an internal session but is |
0 | 839 |
# processing a commit, we have to use another one |
840 |
if not session.is_internal_session: |
|
841 |
session = self.internal_session() |
|
842 |
reset_pool = True |
|
843 |
try: |
|
844 |
eid = self.system_source.create_eid(session) |
|
845 |
self._extid_cache[cachekey] = eid |
|
1954 | 846 |
self._type_source_cache[eid] = (etype, source.uri, extid) |
847 |
entity = source.before_entity_insertion(session, extid, etype, eid) |
|
0 | 848 |
if source.should_call_hooks: |
849 |
self.hm.call_hooks('before_add_entity', etype, session, entity) |
|
450
5e14ea0e81c8
a note for later
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
341
diff
changeset
|
850 |
# XXX call add_info with complete=False ? |
1954 | 851 |
self.add_info(session, entity, source, extid) |
852 |
source.after_entity_insertion(session, extid, entity) |
|
0 | 853 |
if source.should_call_hooks: |
854 |
self.hm.call_hooks('after_add_entity', etype, session, entity) |
|
855 |
else: |
|
856 |
# minimal meta-data |
|
857 |
session.execute('SET X is E WHERE X eid %(x)s, E name %(name)s', |
|
858 |
{'x': entity.eid, 'name': entity.id}, 'x') |
|
859 |
session.commit(reset_pool) |
|
860 |
return eid |
|
861 |
except: |
|
862 |
session.rollback(reset_pool) |
|
863 |
raise |
|
1482 | 864 |
|
0 | 865 |
def add_info(self, session, entity, source, extid=None, complete=True): |
866 |
"""add type and source info for an eid into the system table, |
|
867 |
and index the entity with the full text index |
|
868 |
""" |
|
869 |
# begin by inserting eid/type/source/extid into the entities table |
|
870 |
self.system_source.add_info(session, entity, source, extid) |
|
871 |
if complete: |
|
872 |
entity.complete(entity.e_schema.indexable_attributes()) |
|
2101
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
873 |
new = session.transaction_data.setdefault('neweids', set()) |
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
874 |
new.add(entity.eid) |
0 | 875 |
# now we can update the full text index |
1160
77bf88f01fcc
new delay-full-text-indexation configuration option
sylvain.thenault@logilab.fr
parents:
594
diff
changeset
|
876 |
if self.do_fti: |
77bf88f01fcc
new delay-full-text-indexation configuration option
sylvain.thenault@logilab.fr
parents:
594
diff
changeset
|
877 |
FTIndexEntityOp(session, entity=entity) |
0 | 878 |
CleanupEidTypeCacheOp(session) |
1482 | 879 |
|
0 | 880 |
def delete_info(self, session, eid): |
881 |
self._prepare_delete_info(session, eid) |
|
882 |
self._delete_info(session, eid) |
|
1482 | 883 |
|
0 | 884 |
def _prepare_delete_info(self, session, eid): |
885 |
"""prepare the repository for deletion of an entity: |
|
886 |
* update the fti |
|
887 |
* mark eid as being deleted in session info |
|
888 |
* setup cache update operation |
|
889 |
""" |
|
890 |
self.system_source.fti_unindex_entity(session, eid) |
|
2101
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
891 |
pending = session.transaction_data.setdefault('pendingeids', set()) |
0 | 892 |
pending.add(eid) |
893 |
CleanupEidTypeCacheOp(session) |
|
1482 | 894 |
|
0 | 895 |
def _delete_info(self, session, eid): |
896 |
"""delete system information on deletion of an entity: |
|
897 |
* delete all relations on this entity |
|
898 |
* transfer record from the entities table to the deleted_entities table |
|
899 |
""" |
|
900 |
etype, uri, extid = self.type_and_source_from_eid(eid, session) |
|
901 |
self._clear_eid_relations(session, etype, eid) |
|
902 |
self.system_source.delete_info(session, eid, etype, uri, extid) |
|
1482 | 903 |
|
0 | 904 |
def _clear_eid_relations(self, session, etype, eid): |
905 |
"""when a entity is deleted, build and execute rql query to delete all |
|
906 |
its relations |
|
907 |
""" |
|
908 |
rql = [] |
|
909 |
eschema = self.schema.eschema(etype) |
|
910 |
for rschema, targetschemas, x in eschema.relation_definitions(): |
|
911 |
rtype = rschema.type |
|
912 |
if rtype == 'identity': |
|
913 |
continue |
|
914 |
var = '%s%s' % (rtype.upper(), x.upper()) |
|
915 |
if x == 'subject': |
|
916 |
# don't skip inlined relation so they are regularly |
|
917 |
# deleted and so hooks are correctly called |
|
918 |
rql.append('X %s %s' % (rtype, var)) |
|
919 |
else: |
|
920 |
rql.append('%s %s X' % (var, rtype)) |
|
921 |
rql = 'DELETE %s WHERE X eid %%(x)s' % ','.join(rql) |
|
922 |
# unsafe_execute since we suppose that if user can delete the entity, |
|
923 |
# he can delete all its relations without security checking |
|
924 |
session.unsafe_execute(rql, {'x': eid}, 'x', build_descr=False) |
|
925 |
||
926 |
def index_entity(self, session, entity): |
|
927 |
"""full text index a modified entity""" |
|
2101
08003e0354a7
update transaction data api
Sylvain Thénault <sylvain.thenault@logilab.fr>
parents:
1977
diff
changeset
|
928 |
alreadydone = session.transaction_data.setdefault('indexedeids', set()) |
0 | 929 |
if entity.eid in alreadydone: |
930 |
self.info('skipping reindexation of %s, already done', entity.eid) |
|
931 |
return |
|
932 |
alreadydone.add(entity.eid) |
|
933 |
self.system_source.fti_index_entity(session, entity) |
|
1482 | 934 |
|
0 | 935 |
def locate_relation_source(self, session, subject, rtype, object): |
936 |
subjsource = self.source_from_eid(subject, session) |
|
937 |
objsource = self.source_from_eid(object, session) |
|
938 |
if not (subjsource is objsource and subjsource.support_relation(rtype, 1)): |
|
939 |
source = self.system_source |
|
940 |
if not source.support_relation(rtype, 1): |
|
941 |
raise RTypeNotSupportedBySources(rtype) |
|
942 |
else: |
|
943 |
source = subjsource |
|
944 |
return source |
|
1482 | 945 |
|
0 | 946 |
def locate_etype_source(self, etype): |
947 |
for source in self.sources: |
|
948 |
if source.support_entity(etype, 1): |
|
949 |
return source |
|
950 |
else: |
|
951 |
raise ETypeNotSupportedBySources(etype) |
|
1482 | 952 |
|
0 | 953 |
def glob_add_entity(self, session, entity): |
954 |
"""add an entity to the repository |
|
1482 | 955 |
|
0 | 956 |
the entity eid should originaly be None and a unique eid is assigned to |
957 |
the entity instance |
|
958 |
""" |
|
959 |
entity = entity.pre_add_hook() |
|
960 |
eschema = entity.e_schema |
|
961 |
etype = str(eschema) |
|
962 |
source = self.locate_etype_source(etype) |
|
963 |
# attribute an eid to the entity before calling hooks |
|
964 |
entity.set_eid(self.system_source.create_eid(session)) |
|
965 |
entity._is_saved = False # entity has an eid but is not yet saved |
|
966 |
relations = [] |
|
967 |
# if inlined relations are specified, fill entity's related cache to |
|
968 |
# avoid unnecessary queries |
|
969 |
for attr in entity.keys(): |
|
970 |
rschema = eschema.subject_relation(attr) |
|
971 |
if not rschema.is_final(): # inlined relation |
|
972 |
entity.set_related_cache(attr, 'subject', |
|
973 |
entity.req.eid_rset(entity[attr])) |
|
974 |
relations.append((attr, entity[attr])) |
|
975 |
if source.should_call_hooks: |
|
976 |
self.hm.call_hooks('before_add_entity', etype, session, entity) |
|
977 |
entity.set_defaults() |
|
978 |
entity.check(creation=True) |
|
979 |
source.add_entity(session, entity) |
|
980 |
if source.uri != 'system': |
|
981 |
extid = source.get_extid(entity) |
|
982 |
self._extid_cache[(str(extid), source.uri)] = entity.eid |
|
983 |
else: |
|
984 |
extid = None |
|
985 |
self.add_info(session, entity, source, extid, complete=False) |
|
986 |
entity._is_saved = True # entity has an eid and is saved |
|
987 |
#print 'added', entity#, entity.items() |
|
988 |
# trigger after_add_entity after after_add_relation |
|
989 |
if source.should_call_hooks: |
|
990 |
self.hm.call_hooks('after_add_entity', etype, session, entity) |
|
991 |
# call hooks for inlined relations |
|
992 |
for attr, value in relations: |
|
993 |
self.hm.call_hooks('before_add_relation', attr, session, |
|
994 |
entity.eid, attr, value) |
|
995 |
self.hm.call_hooks('after_add_relation', attr, session, |
|
996 |
entity.eid, attr, value) |
|
997 |
return entity.eid |
|
1482 | 998 |
|
0 | 999 |
def glob_update_entity(self, session, entity): |
1000 |
"""replace an entity in the repository |
|
1001 |
the type and the eid of an entity must not be changed |
|
1002 |
""" |
|
1003 |
#print 'update', entity |
|
1004 |
entity.check() |
|
1005 |
etype = str(entity.e_schema) |
|
1006 |
eschema = entity.e_schema |
|
1007 |
only_inline_rels, need_fti_update = True, False |
|
1008 |
relations = [] |
|
1009 |
for attr in entity.keys(): |
|
1010 |
if attr == 'eid': |
|
1011 |
continue |
|
1012 |
rschema = eschema.subject_relation(attr) |
|
1013 |
if rschema.is_final(): |
|
1014 |
if eschema.rproperty(attr, 'fulltextindexed'): |
|
1015 |
need_fti_update = True |
|
1016 |
only_inline_rels = False |
|
1017 |
else: |
|
1018 |
# inlined relation |
|
1019 |
previous_value = entity.related(attr) |
|
1020 |
if previous_value: |
|
1021 |
previous_value = previous_value[0][0] # got a result set |
|
1022 |
self.hm.call_hooks('before_delete_relation', attr, session, |
|
1023 |
entity.eid, attr, previous_value) |
|
1024 |
entity.set_related_cache(attr, 'subject', |
|
1025 |
entity.req.eid_rset(entity[attr])) |
|
1026 |
relations.append((attr, entity[attr], previous_value)) |
|
1027 |
source = self.source_from_eid(entity.eid, session) |
|
1028 |
if source.should_call_hooks: |
|
1029 |
# call hooks for inlined relations |
|
1030 |
for attr, value, _ in relations: |
|
1031 |
self.hm.call_hooks('before_add_relation', attr, session, |
|
1032 |
entity.eid, attr, value) |
|
1033 |
if not only_inline_rels: |
|
1034 |
self.hm.call_hooks('before_update_entity', etype, session, |
|
1035 |
entity) |
|
1036 |
source.update_entity(session, entity) |
|
1037 |
if not only_inline_rels: |
|
1160
77bf88f01fcc
new delay-full-text-indexation configuration option
sylvain.thenault@logilab.fr
parents:
594
diff
changeset
|
1038 |
if need_fti_update and self.do_fti: |
0 | 1039 |
# reindex the entity only if this query is updating at least |
1040 |
# one indexable attribute |
|
1041 |
FTIndexEntityOp(session, entity=entity) |
|
1042 |
if source.should_call_hooks: |
|
1043 |
self.hm.call_hooks('after_update_entity', etype, session, |
|
1044 |
entity) |
|
1045 |
if source.should_call_hooks: |
|
1046 |
for attr, value, prevvalue in relations: |
|
1047 |
if prevvalue: |
|
1048 |
self.hm.call_hooks('after_delete_relation', attr, session, |
|
1049 |
entity.eid, attr, prevvalue) |
|
1050 |
del_existing_rel_if_needed(session, entity.eid, attr, value) |
|
1051 |
self.hm.call_hooks('after_add_relation', attr, session, |
|
1052 |
entity.eid, attr, value) |
|
1053 |
||
1054 |
def glob_delete_entity(self, session, eid): |
|
1055 |
"""delete an entity and all related entities from the repository""" |
|
1056 |
#print 'deleting', eid |
|
1057 |
# call delete_info before hooks |
|
1058 |
self._prepare_delete_info(session, eid) |
|
1059 |
etype, uri, extid = self.type_and_source_from_eid(eid, session) |
|
1060 |
source = self.sources_by_uri[uri] |
|
1061 |
if source.should_call_hooks: |
|
1062 |
self.hm.call_hooks('before_delete_entity', etype, session, eid) |
|
1063 |
self._delete_info(session, eid) |
|
1064 |
source.delete_entity(session, etype, eid) |
|
1065 |
if source.should_call_hooks: |
|
1066 |
self.hm.call_hooks('after_delete_entity', etype, session, eid) |
|
1067 |
# don't clear cache here this is done in a hook on commit |
|
1482 | 1068 |
|
0 | 1069 |
def glob_add_relation(self, session, subject, rtype, object): |
1070 |
"""add a relation to the repository""" |
|
1071 |
assert subject is not None |
|
1072 |
assert rtype |
|
1073 |
assert object is not None |
|
1074 |
source = self.locate_relation_source(session, subject, rtype, object) |
|
1075 |
#print 'adding', subject, rtype, object, 'to', source |
|
1076 |
if source.should_call_hooks: |
|
1077 |
del_existing_rel_if_needed(session, subject, rtype, object) |
|
1078 |
self.hm.call_hooks('before_add_relation', rtype, session, |
|
1079 |
subject, rtype, object) |
|
1080 |
source.add_relation(session, subject, rtype, object) |
|
1081 |
if source.should_call_hooks: |
|
1082 |
self.hm.call_hooks('after_add_relation', rtype, session, |
|
1083 |
subject, rtype, object) |
|
1084 |
||
1085 |
def glob_delete_relation(self, session, subject, rtype, object): |
|
1086 |
"""delete a relation from the repository""" |
|
1087 |
assert subject is not None |
|
1088 |
assert rtype |
|
1089 |
assert object is not None |
|
1090 |
source = self.locate_relation_source(session, subject, rtype, object) |
|
1091 |
#print 'delete rel', subject, rtype, object |
|
1092 |
if source.should_call_hooks: |
|
1093 |
self.hm.call_hooks('before_delete_relation', rtype, session, |
|
1094 |
subject, rtype, object) |
|
1095 |
source.delete_relation(session, subject, rtype, object) |
|
1096 |
if self.schema.rschema(rtype).symetric: |
|
1097 |
# on symetric relation, we can't now in which sense it's |
|
1098 |
# stored so try to delete both |
|
1099 |
source.delete_relation(session, object, rtype, subject) |
|
1100 |
if source.should_call_hooks: |
|
1101 |
self.hm.call_hooks('after_delete_relation', rtype, session, |
|
1102 |
subject, rtype, object) |
|
1103 |
||
1104 |
||
1105 |
# pyro handling ########################################################### |
|
1482 | 1106 |
|
0 | 1107 |
def pyro_register(self, host=''): |
1108 |
"""register the repository as a pyro object""" |
|
1109 |
from Pyro import core |
|
1110 |
port = self.config['pyro-port'] |
|
1111 |
nshost, nsgroup = self.config['pyro-ns-host'], self.config['pyro-ns-group'] |
|
1112 |
nsgroup = ':' + nsgroup |
|
1113 |
core.initServer(banner=0) |
|
1114 |
daemon = core.Daemon(host=host, port=port) |
|
1115 |
daemon.useNameServer(self.pyro_nameserver(nshost, nsgroup)) |
|
1116 |
# use Delegation approach |
|
1117 |
impl = core.ObjBase() |
|
1118 |
impl.delegateTo(self) |
|
1119 |
nsid = self.config['pyro-id'] or self.config.appid |
|
1120 |
daemon.connect(impl, '%s.%s' % (nsgroup, nsid)) |
|
1121 |
msg = 'repository registered as a pyro object using group %s and id %s' |
|
1122 |
self.info(msg, nsgroup, nsid) |
|
1123 |
self.pyro_registered = True |
|
1124 |
return daemon |
|
1482 | 1125 |
|
0 | 1126 |
def pyro_nameserver(self, host=None, group=None): |
1127 |
"""locate and bind the the name server to the daemon""" |
|
1128 |
from Pyro import naming, errors |
|
1129 |
# locate the name server |
|
1130 |
nameserver = naming.NameServerLocator().getNS(host) |
|
1131 |
if group is not None: |
|
1132 |
# make sure our namespace group exists |
|
1133 |
try: |
|
1134 |
nameserver.createGroup(group) |
|
1135 |
except errors.NamingError: |
|
1136 |
pass |
|
1137 |
return nameserver |
|
1138 |
||
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1139 |
# multi-sources planner helpers ########################################### |
1482 | 1140 |
|
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1141 |
@cached |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1142 |
def rel_type_sources(self, rtype): |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1143 |
return [source for source in self.sources |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1144 |
if source.support_relation(rtype) |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1145 |
or rtype in source.dont_cross_relations] |
1482 | 1146 |
|
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1147 |
@cached |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1148 |
def can_cross_relation(self, rtype): |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1149 |
return [source for source in self.sources |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1150 |
if source.support_relation(rtype) |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1151 |
and rtype in source.cross_relations] |
1482 | 1152 |
|
1228
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1153 |
@cached |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1154 |
def is_multi_sources_relation(self, rtype): |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1155 |
return any(source for source in self.sources |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1156 |
if not source is self.system_source |
91ae10ffb611
* refactor ms planner (renaming, reorganization)
sylvain.thenault@logilab.fr
parents:
1217
diff
changeset
|
1157 |
and source.support_relation(rtype)) |
1482 | 1158 |
|
0 | 1159 |
|
1160 |
def pyro_unregister(config): |
|
1161 |
"""unregister the repository from the pyro name server""" |
|
1162 |
nshost, nsgroup = config['pyro-ns-host'], config['pyro-ns-group'] |
|
1163 |
appid = config['pyro-id'] or config.appid |
|
1164 |
from Pyro import core, naming, errors |
|
1165 |
core.initClient(banner=False) |
|
1166 |
try: |
|
1167 |
nameserver = naming.NameServerLocator().getNS(nshost) |
|
1168 |
except errors.PyroError, ex: |
|
1169 |
# name server not responding |
|
1170 |
config.error('can\'t locate pyro name server: %s', ex) |
|
1171 |
return |
|
1172 |
try: |
|
1173 |
nameserver.unregister(':%s.%s' % (nsgroup, appid)) |
|
1174 |
config.info('%s unregistered from pyro name server', appid) |
|
1175 |
except errors.NamingError: |
|
1176 |
config.warning('%s already unregistered from pyro name server', appid) |
|
1177 |
||
1178 |
||
1179 |
from logging import getLogger |
|
1180 |
from cubicweb import set_log_methods |
|
1181 |
set_log_methods(Repository, getLogger('cubicweb.repository')) |